Skip to content

Commit

Permalink
chore(operator): Make operator a bit chattier
Browse files Browse the repository at this point in the history
The `operator` is able to generate quite a bit of load on the DB and
should log on info level when it is working.
  • Loading branch information
silvestre committed Jan 18, 2024
1 parent d574016 commit 4bcc8c0
Show file tree
Hide file tree
Showing 7 changed files with 35 additions and 24 deletions.
16 changes: 10 additions & 6 deletions src/autoscaler/operator/applicationsyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,24 @@ type Operator interface {
var _ Operator = &ApplicationSynchronizer{}

type ApplicationSynchronizer struct {
cfClient cf.CFClient
cfClient cf.ContextClient
policyDb db.PolicyDB
logger lager.Logger
}

func NewApplicationSynchronizer(cfClient cf.CFClient, policyDb db.PolicyDB, logger lager.Logger) *ApplicationSynchronizer {
func NewApplicationSynchronizer(cfClient cf.ContextClient, policyDb db.PolicyDB, logger lager.Logger) *ApplicationSynchronizer {
return &ApplicationSynchronizer{
policyDb: policyDb,
cfClient: cfClient,
logger: logger,
logger: logger.Session("application-synchronizer"),
}
}

func (as ApplicationSynchronizer) Operate(ctx context.Context) {
logger := as.logger.Session("syncing-apps")
logger.Info("starting")
defer logger.Info("completed")

// Get all the application details from policyDB
appIds, err := as.policyDb.GetAppIds(ctx)
if err != nil {
Expand All @@ -37,18 +41,18 @@ func (as ApplicationSynchronizer) Operate(ctx context.Context) {
}
// For each app check if they really exist or not via CC api call
for appID := range appIds {
_, err = as.cfClient.GetApp(cf.Guid(appID))
_, err = as.cfClient.GetApp(ctx, cf.Guid(appID))
if err != nil {
as.logger.Error("failed-to-get-app-info", err)
if cf.IsNotFound(err) {
// Application does not exist, lets clean up app details from policyDB
err = as.policyDb.DeletePolicy(context.Background(), appID)
err = as.policyDb.DeletePolicy(ctx, appID)
if err != nil {
as.logger.Error("failed-to-prune-non-existent-application-details", err)
//TODO make this a continue and write a test.
return
}
as.logger.Info("successfully-pruned-non-existent-applcation", lager.Data{"appid": appID})
as.logger.Info("successfully-pruned-non-existent-application", lager.Data{"appid": appID})
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/autoscaler/operator/applicationsyncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ import (
var _ = Describe("AppSynchronizer", func() {
var (
appSynchronizer *operator.ApplicationSynchronizer
cfc *fakes.FakeCFClient
cfc *fakes.FakeContextClient
policyDB *fakes.FakePolicyDB
)

BeforeEach(func() {
logger := lagertest.NewTestLogger("application-synchoronizer-test")
cfc = &fakes.FakeCFClient{}
cfc = &fakes.FakeContextClient{}
policyDB = &fakes.FakePolicyDB{}
appSynchronizer = operator.NewApplicationSynchronizer(cfc, policyDB, logger)
})
Expand Down
9 changes: 5 additions & 4 deletions src/autoscaler/operator/appmetricsdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"time"

"code.cloudfoundry.org/app-autoscaler/src/autoscaler/db"

"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager/v3"
)
Expand All @@ -22,15 +21,17 @@ func NewAppMetricsDbPruner(appMetricsDb db.AppMetricDB, cutoffDuration time.Dura
appMetricsDb: appMetricsDb,
cutoffDuration: cutoffDuration,
clock: clock,
logger: logger,
logger: logger.Session("app_metrics_db_pruner"),
}
}

func (amdp AppMetricsDbPruner) Operate(ctx context.Context) {
amdp.logger.Debug("Pruning app metrics")

timestamp := amdp.clock.Now().Add(-amdp.cutoffDuration).UnixNano()

logger := amdp.logger.Session("pruning-app-metrics", lager.Data{"cutoff-time": timestamp})
logger.Info("starting")
defer logger.Info("completed")

err := amdp.appMetricsDb.PruneAppMetrics(ctx, timestamp)
if err != nil {
amdp.logger.Error("failed-prune-appmetrics", err)
Expand Down
2 changes: 1 addition & 1 deletion src/autoscaler/operator/cmd/operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func main() {
schedulerSyncRunner := operator.NewOperatorRunner(schedulerSync, conf.Scheduler.SyncInterval, prClock, logger.Session(loggerSessionName))

loggerSessionName = "application-sync"
applicationSync := operator.NewApplicationSynchronizer(cfClient, policyDb, logger.Session(loggerSessionName))
applicationSync := operator.NewApplicationSynchronizer(cfClient.GetCtxClient(), policyDb, logger.Session(loggerSessionName))
applicationSyncRunner := operator.NewOperatorRunner(applicationSync, conf.AppSyncer.SyncInterval, prClock, logger.Session(loggerSessionName))

members := grouper.Members{
Expand Down
10 changes: 6 additions & 4 deletions src/autoscaler/operator/instancemetricsdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"time"

"code.cloudfoundry.org/app-autoscaler/src/autoscaler/db"

"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager/v3"
)
Expand All @@ -22,14 +21,17 @@ func NewInstanceMetricsDbPruner(instanceMetricsDb db.InstanceMetricsDB, cutoffDu
instanceMetricsDb: instanceMetricsDb,
cutoffDuration: cutoffDuration,
clock: clock,
logger: logger,
logger: logger.Session("instance_metrics_db_pruner"),
}
}

func (idp InstanceMetricsDbPruner) Operate(ctx context.Context) {
idp.logger.Debug("Pruning instance metrics")

timestamp := idp.clock.Now().Add(-idp.cutoffDuration).UnixNano()

logger := idp.logger.Session("pruning-instance-metrics", lager.Data{"cutoff-time": timestamp})
logger.Info("starting")
defer logger.Info("completed")

err := idp.instanceMetricsDb.PruneInstanceMetrics(ctx, timestamp)
if err != nil {
idp.logger.Error("failed-prune-metrics", err)
Expand Down
10 changes: 6 additions & 4 deletions src/autoscaler/operator/scalingenginedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"time"

"code.cloudfoundry.org/app-autoscaler/src/autoscaler/db"

"code.cloudfoundry.org/clock"
"code.cloudfoundry.org/lager/v3"
)
Expand All @@ -22,14 +21,17 @@ func NewScalingEngineDbPruner(scalingEngineDb db.ScalingEngineDB, cutoffDuration
scalingEngineDb: scalingEngineDb,
cutoffDuration: cutoffDuration,
clock: clock,
logger: logger,
logger: logger.Session("scaling_engine_db_pruner"),
}
}

func (sdp ScalingEngineDbPruner) Operate(ctx context.Context) {
sdp.logger.Debug("Pruning scaling histories")

timestamp := sdp.clock.Now().Add(-sdp.cutoffDuration).UnixNano()

logger := sdp.logger.Session("pruning-instance-metrics", lager.Data{"cutoff-time": timestamp})
logger.Info("starting")
defer logger.Info("completed")

err := sdp.scalingEngineDb.PruneScalingHistories(ctx, timestamp)
if err != nil {
sdp.logger.Error("failed-prune-scaling-histories", err)
Expand Down
8 changes: 5 additions & 3 deletions src/autoscaler/operator/schedulersynchronizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package operator

import (
"context"
"fmt"
"net/http"

"code.cloudfoundry.org/app-autoscaler/src/autoscaler/routes"
Expand All @@ -23,13 +22,16 @@ func NewScheduleSynchronizer(client *http.Client, url string, clock clock.Clock,
client: client,
url: url,
clock: clock,
logger: logger,
logger: logger.Session("schedule_synchronizer"),
}
}

func (s ScheduleSynchronizer) Operate(ctx context.Context) {
syncURL := s.url + routes.SyncActiveSchedulesPath
s.logger.Debug(fmt.Sprintf("Sync schedules of %s", syncURL))

logger := s.logger.Session("syncing-schedules", lager.Data{"sync-url": syncURL})
logger.Info("starting")
defer logger.Info("completed")

req, err := http.NewRequestWithContext(ctx, "PUT", syncURL, nil)
if err != nil {
Expand Down

0 comments on commit 4bcc8c0

Please sign in to comment.