Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,12 @@ func main() {
// The scheduler client calls the nova external scheduler API to get placement decisions
schedulerClient := reservations.NewSchedulerClient(failoverConfig.SchedulerURL)

failoverMonitor := failover.NewFailoverMonitor()
if err := metrics.Registry.Register(failoverMonitor); err != nil {
setupLog.Error(err, "failed to register failover monitor metrics, continuing without metrics")
failoverMonitor = nil
}

// Defer the initialization of PostgresReader until the manager starts
// because the cache is not ready during setup
if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error {
Expand All @@ -596,6 +602,7 @@ func main() {
vmSource,
failoverConfig,
schedulerClient,
failoverMonitor,
)

// Set up the watch-based reconciler for per-reservation reconciliation
Expand Down
23 changes: 17 additions & 6 deletions internal/scheduling/reservations/failover/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,17 @@ type FailoverReservationController struct {
Config FailoverConfig
SchedulerClient *reservations.SchedulerClient
Recorder events.EventRecorder // Event recorder for emitting Kubernetes events
reconcileCount int64 // Track reconciliation count for rotating VM selection
Monitor *FailoverMonitor
reconcileCount int64 // Track reconciliation count for rotating VM selection
}

func NewFailoverReservationController(c client.Client, vmSource VMSource, config FailoverConfig, schedulerClient *reservations.SchedulerClient) *FailoverReservationController {
func NewFailoverReservationController(c client.Client, vmSource VMSource, config FailoverConfig, schedulerClient *reservations.SchedulerClient, monitor *FailoverMonitor) *FailoverReservationController {
return &FailoverReservationController{
Client: c,
VMSource: vmSource,
Config: config,
SchedulerClient: schedulerClient,
Monitor: monitor,
}
}

Expand Down Expand Up @@ -229,6 +231,9 @@ func (c *FailoverReservationController) validateReservation(ctx context.Context,

// reconcileSummary holds statistics from the reconciliation cycle.
type reconcileSummary struct {
duration time.Duration
totalVMs int
totalReservations int
vmsMissingFailover int
vmsProcessed int
reservationsNeeded int
Expand Down Expand Up @@ -324,7 +329,9 @@ func (c *FailoverReservationController) ReconcilePeriodic(ctx context.Context) (
summary.totalFailed = assignSummary.totalFailed

// Log summary
duration := time.Since(startTime)
summary.duration = time.Since(startTime)
summary.totalVMs = len(vms)
summary.totalReservations = len(failoverReservations)
Comment on lines +332 to +334
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

totalReservations is underreported after reservation creation.

On Line 334, summary.totalReservations = len(failoverReservations) uses the pre-creation slice size. Since step 6 can create reservations, the logged/recorded total will be too low.

🔧 Proposed fix
-	summary.totalReservations = len(failoverReservations)
+	summary.totalReservations = len(failoverReservations) + summary.totalCreated
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
summary.duration = time.Since(startTime)
summary.totalVMs = len(vms)
summary.totalReservations = len(failoverReservations)
summary.duration = time.Since(startTime)
summary.totalVMs = len(vms)
summary.totalReservations = len(failoverReservations) + summary.totalCreated
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@internal/scheduling/reservations/failover/controller.go` around lines 332 -
334, summary.totalReservations is being set from the pre-creation slice
(failoverReservations) so it underreports when step 6 creates additional
reservations; update the controller so summary.totalReservations reflects the
actual number after creation by recalculating it once reservation creation
completes (e.g., set summary.totalReservations = len(failoverReservations) after
any newly created reservations have been appended, or increment
summary.totalReservations during the creation loop), referencing the
summary.totalReservations and failoverReservations variables in the reservation
creation flow in controller.go.

requeueAfter := c.Config.ReconcileInterval.Duration
successCount := summary.totalCreated + summary.totalReused
madeProgress := successCount >= *c.Config.MinSuccessForShortInterval
Expand All @@ -334,10 +341,10 @@ func (c *FailoverReservationController) ReconcilePeriodic(ctx context.Context) (
}
logger.Info("periodic reconciliation completed",
"reconcileCount", c.reconcileCount,
"duration", duration.Round(time.Millisecond),
"duration", summary.duration.Round(time.Millisecond),
"requeueAfter", requeueAfter,
"totalVMs", len(vms),
"totalReservations", len(failoverReservations),
"totalVMs", summary.totalVMs,
"totalReservations", summary.totalReservations,
"vmsMissingFailover", summary.vmsMissingFailover,
"vmsProcessed", summary.vmsProcessed,
"reservationsNeeded", summary.reservationsNeeded,
Expand All @@ -347,6 +354,10 @@ func (c *FailoverReservationController) ReconcilePeriodic(ctx context.Context) (
"updated", summary.reservationsUpdated,
"deleted", summary.reservationsDeleted)

if c.Monitor != nil {
c.Monitor.RecordReconciliation(summary, "")
}

return ctrl.Result{RequeueAfter: requeueAfter}, nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -693,6 +693,7 @@ func (env *IntegrationTestEnv) TriggerFailoverReconcile(flavorRequirements map[s
env.VMSource,
config,
schedulerClient,
nil,
)

_, err := controller.ReconcilePeriodic(context.Background())
Expand Down
150 changes: 150 additions & 0 deletions internal/scheduling/reservations/failover/monitor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
// Copyright SAP SE
// SPDX-License-Identifier: Apache-2.0

package failover

import (
"github.com/prometheus/client_golang/prometheus"
)

var azLabel = []string{"availability_zone"}

// FailoverMonitor provides Prometheus metrics for the failover reconciliation controller.
type FailoverMonitor struct {
reconciliationRuns *prometheus.CounterVec
reconciliationDuration *prometheus.HistogramVec
totalVMs *prometheus.GaugeVec
totalReservations *prometheus.GaugeVec
vmsMissingFailover *prometheus.GaugeVec
vmsProcessed *prometheus.CounterVec
reservationsNeeded *prometheus.CounterVec
reservationsReused *prometheus.CounterVec
reservationsCreated *prometheus.CounterVec
reservationsFailed *prometheus.CounterVec
reservationsUpdated *prometheus.CounterVec
reservationsDeleted *prometheus.CounterVec
}

// NewFailoverMonitor creates a new monitor with Prometheus metrics.
func NewFailoverMonitor() *FailoverMonitor {
m := &FailoverMonitor{
reconciliationRuns: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_runs_total",
Help: "Total number of failover periodic reconciliation runs since pod restart",
}, azLabel),
reconciliationDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "cortex_failover_reconciliation_duration_seconds",
Help: "Duration of failover periodic reconciliation cycles",
Buckets: []float64{0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60},
}, azLabel),
totalVMs: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "cortex_failover_reconciliation_total_vms",
Help: "Total number of VMs seen during the last reconciliation",
}, azLabel),
totalReservations: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "cortex_failover_reconciliation_total_reservations",
Help: "Total number of failover reservations during the last reconciliation",
}, azLabel),
vmsMissingFailover: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "cortex_failover_reconciliation_vms_missing_failover",
Help: "Number of VMs missing required failover reservations during the last reconciliation",
}, azLabel),
vmsProcessed: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_vms_processed_total",
Help: "Total number of VMs processed across all reconciliation cycles since pod restart",
}, azLabel),
reservationsNeeded: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_reservations_needed_total",
Help: "Total number of reservations needed across all reconciliation cycles since pod restart",
}, azLabel),
reservationsReused: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_reservations_reused_total",
Help: "Total number of reservations reused across all reconciliation cycles since pod restart",
}, azLabel),
reservationsCreated: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_reservations_created_total",
Help: "Total number of reservations created across all reconciliation cycles since pod restart",
}, azLabel),
reservationsFailed: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_reservations_failed_total",
Help: "Total number of failed reservation attempts across all reconciliation cycles since pod restart",
}, azLabel),
reservationsUpdated: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_reservations_updated_total",
Help: "Total number of reservation allocation updates across all reconciliation cycles since pod restart",
}, azLabel),
reservationsDeleted: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "cortex_failover_reconciliation_reservations_deleted_total",
Help: "Total number of empty reservations deleted across all reconciliation cycles since pod restart",
}, azLabel),
}

// Pre-initialize the aggregate label so metrics appear even before the first reconciliation.
m.preInitialize("")

return m
}

func (m *FailoverMonitor) preInitialize(az string) {
m.reconciliationRuns.WithLabelValues(az)
m.reconciliationDuration.WithLabelValues(az)
m.totalVMs.WithLabelValues(az)
m.totalReservations.WithLabelValues(az)
m.vmsMissingFailover.WithLabelValues(az)
m.vmsProcessed.WithLabelValues(az)
m.reservationsNeeded.WithLabelValues(az)
m.reservationsReused.WithLabelValues(az)
m.reservationsCreated.WithLabelValues(az)
m.reservationsFailed.WithLabelValues(az)
m.reservationsUpdated.WithLabelValues(az)
m.reservationsDeleted.WithLabelValues(az)
}

// RecordReconciliation records all metrics from a single reconciliation cycle.
// The availabilityZone parameter allows future per-AZ reporting; pass "" for aggregate.
func (m *FailoverMonitor) RecordReconciliation(summary reconcileSummary, availabilityZone string) {
m.reconciliationRuns.WithLabelValues(availabilityZone).Inc()
m.reconciliationDuration.WithLabelValues(availabilityZone).Observe(summary.duration.Seconds())
m.totalVMs.WithLabelValues(availabilityZone).Set(float64(summary.totalVMs))
m.totalReservations.WithLabelValues(availabilityZone).Set(float64(summary.totalReservations))
m.vmsMissingFailover.WithLabelValues(availabilityZone).Set(float64(summary.vmsMissingFailover))
m.vmsProcessed.WithLabelValues(availabilityZone).Add(float64(summary.vmsProcessed))
m.reservationsNeeded.WithLabelValues(availabilityZone).Add(float64(summary.reservationsNeeded))
m.reservationsReused.WithLabelValues(availabilityZone).Add(float64(summary.totalReused))
m.reservationsCreated.WithLabelValues(availabilityZone).Add(float64(summary.totalCreated))
m.reservationsFailed.WithLabelValues(availabilityZone).Add(float64(summary.totalFailed))
m.reservationsUpdated.WithLabelValues(availabilityZone).Add(float64(summary.reservationsUpdated))
m.reservationsDeleted.WithLabelValues(availabilityZone).Add(float64(summary.reservationsDeleted))
}

// Describe implements prometheus.Collector.
func (m *FailoverMonitor) Describe(ch chan<- *prometheus.Desc) {
m.reconciliationRuns.Describe(ch)
m.reconciliationDuration.Describe(ch)
m.totalVMs.Describe(ch)
m.totalReservations.Describe(ch)
m.vmsMissingFailover.Describe(ch)
m.vmsProcessed.Describe(ch)
m.reservationsNeeded.Describe(ch)
m.reservationsReused.Describe(ch)
m.reservationsCreated.Describe(ch)
m.reservationsFailed.Describe(ch)
m.reservationsUpdated.Describe(ch)
m.reservationsDeleted.Describe(ch)
}

// Collect implements prometheus.Collector.
func (m *FailoverMonitor) Collect(ch chan<- prometheus.Metric) {
m.reconciliationRuns.Collect(ch)
m.reconciliationDuration.Collect(ch)
m.totalVMs.Collect(ch)
m.totalReservations.Collect(ch)
m.vmsMissingFailover.Collect(ch)
m.vmsProcessed.Collect(ch)
m.reservationsNeeded.Collect(ch)
m.reservationsReused.Collect(ch)
m.reservationsCreated.Collect(ch)
m.reservationsFailed.Collect(ch)
m.reservationsUpdated.Collect(ch)
m.reservationsDeleted.Collect(ch)
}
Loading
Loading