diff --git a/ci/autoscaler/scripts/deploy-autoscaler.sh b/ci/autoscaler/scripts/deploy-autoscaler.sh index 5b15eb3008..6142fd9c99 100755 --- a/ci/autoscaler/scripts/deploy-autoscaler.sh +++ b/ci/autoscaler/scripts/deploy-autoscaler.sh @@ -47,6 +47,13 @@ function setup_autoscaler_uaac(){ local uaac_authorities="cloud_controller.read,cloud_controller.admin,uaa.resource,routing.routes.write,routing.routes.read,routing.router_groups.read" local autoscaler_secret="autoscaler_client_secret" local uaa_client_secret=$(credhub get -n /bosh-autoscaler/cf/uaa_admin_client_secret --quiet) + # Check if uaac gem is installed + # + if ! gem list -i "cf-uaac" > /dev/null; then + echo "cf-uaac gem is not installed. Please install it by running 'gem install cf-uaac'" + exit 1 + fi + uaac target "https://uaa.${system_domain}" --skip-ssl-validation > /dev/null uaac token client get admin -s "${uaa_client_secret}" > /dev/null diff --git a/jobs/eventgenerator/spec b/jobs/eventgenerator/spec index dc816f076e..8a58d7c719 100644 --- a/jobs/eventgenerator/spec +++ b/jobs/eventgenerator/spec @@ -223,9 +223,6 @@ properties: description: "Number of consecutive failure to trip the circuit down" default: 3 - autoscaler.eventgenerator.health.port: - description: "the listening port of health endpoint" - default: 6204 autoscaler.eventgenerator.health.ca_cert: description: "PEM-encoded CA certificate for the health endpoint" autoscaler.eventgenerator.health.server_cert: diff --git a/jobs/operator/spec b/jobs/operator/spec index ba529df29d..5cb7c8cbd7 100644 --- a/jobs/operator/spec +++ b/jobs/operator/spec @@ -301,8 +301,8 @@ properties: description: "lock db retry interval" default: 5s - autoscaler.operator.health.port: - description: "the listening port of health endpoint" + autoscaler.operator.server.port: + description: "the listening port of server" default: 6208 autoscaler.operator.health.ca_cert: description: "PEM-encoded CA certificate for the health endpoint" diff --git a/jobs/scalingengine/spec b/jobs/scalingengine/spec index 93e06c340c..5c61f3430f 100644 --- a/jobs/scalingengine/spec +++ b/jobs/scalingengine/spec @@ -152,7 +152,6 @@ properties: autoscaler.cf.max_idle_conns_per_host_ms: description: "Controls the maximum idle (keep-alive) connections to keep pooled per-host. If zero, golang default (2) is used. A negative value will mean no idle connection pool reuse." default: 200 - autoscaler.scalingengine.logging.level: description: "the level of logging" default: "info" @@ -168,11 +167,6 @@ properties: description: "PEM-encoded server certificate" autoscaler.scalingengine.server_key: description: "PEM-encoded server key" - - - autoscaler.scalingengine.health.port: - description: "the listening port of health endpoint" - default: 6204 autoscaler.scalingengine.health.ca_cert: description: "PEM-encoded CA certificate for the health endpoint" autoscaler.scalingengine.health.server_cert: diff --git a/src/autoscaler/healthendpoint/server.go b/src/autoscaler/healthendpoint/server.go index 4e4ca9272e..4a7ef2d77b 100644 --- a/src/autoscaler/healthendpoint/server.go +++ b/src/autoscaler/healthendpoint/server.go @@ -36,7 +36,6 @@ import ( "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/tedsuo/ifrit" "golang.org/x/crypto/bcrypt" ) @@ -59,20 +58,6 @@ func (bam *basicAuthenticationMiddleware) middleware(next http.Handler) http.Han }) } -// NewServerWithBasicAuth open the healthcheck port with basic authentication. -// Make sure that username and password is not empty -func NewServerWithBasicAuth(conf helpers.HealthConfig, healthCheckers []Checker, logger lager.Logger, gatherer prometheus.Gatherer, time func() time.Time) (ifrit.Runner, error) { - healthRouter, err := NewHealthRouter(conf, healthCheckers, logger, gatherer, time) - if err != nil { - return nil, err - } - httpServerConfig := helpers.ServerConfig{ - Port: conf.Port, - TLS: conf.TLS, - } - return helpers.NewHTTPServer(logger, httpServerConfig, healthRouter) -} - func NewHealthRouter(conf helpers.HealthConfig, healthCheckers []Checker, logger lager.Logger, gatherer prometheus.Gatherer, time func() time.Time) (*mux.Router, error) { var healthRouter *mux.Router var err error diff --git a/src/autoscaler/helpers/health.go b/src/autoscaler/helpers/health.go index 6631359ba7..bfb9ced0e0 100644 --- a/src/autoscaler/helpers/health.go +++ b/src/autoscaler/helpers/health.go @@ -7,7 +7,6 @@ import ( ) type HealthConfig struct { - ServerConfig `yaml:",inline"` HealthCheckUsername string `yaml:"username"` HealthCheckUsernameHash string `yaml:"username_hash"` HealthCheckPassword string `yaml:"password"` diff --git a/src/autoscaler/operator/cmd/operator/main.go b/src/autoscaler/operator/cmd/operator/main.go index 4118f735df..948a9bdcab 100644 --- a/src/autoscaler/operator/cmd/operator/main.go +++ b/src/autoscaler/operator/cmd/operator/main.go @@ -4,7 +4,6 @@ import ( "flag" "fmt" "os" - "time" "code.cloudfoundry.org/app-autoscaler/src/autoscaler/cf" "code.cloudfoundry.org/app-autoscaler/src/autoscaler/db" @@ -144,7 +143,16 @@ func main() { }) members = append(grouper.Members{{"db-lock-maintainer", dbLockMaintainer}}, members...) - healthServer, err := healthendpoint.NewServerWithBasicAuth(conf.Health, []healthendpoint.Checker{}, logger.Session("health-server"), promRegistry, time.Now) + healthRouter, err := healthendpoint.NewHealthRouter(conf, healthCheckers, logger, gatherer, time) + if err != nil { + return nil, err + } + httpServerConfig := helpers.ServerConfig{ + Port: conf.Server.Port, + TLS: conf.Server.TLS, + } + + healthServer, err := helpers.NewHTTPServer(logger, httpServerConfig, healthRouter) if err != nil { logger.Error("failed to create health server", err) os.Exit(1) diff --git a/templates/app-autoscaler.yml b/templates/app-autoscaler.yml index a1a0e3aeb3..4e55c807d7 100644 --- a/templates/app-autoscaler.yml +++ b/templates/app-autoscaler.yml @@ -190,9 +190,9 @@ instance_groups: client_id: "((cf_client_id))" secret: "((cf_client_secret))" skip_ssl_validation: "((skip_ssl_validation))" + sever: + port: &scalingEngineServerPort 6104 scalingengine: - health: - port: &scalingEngineHealthPort 6204 username: scalingengine password: ((autoscaler_scalingengine_health_password)) logging: @@ -209,7 +209,7 @@ instance_groups: routes: - name: autoscaler_scalingengine_health registration_interval: 20s - port: *scalingEngineHealthPort + port: *scalingEngineServerPort tags: component: autoscaler_scalingengine_health uris: @@ -377,9 +377,10 @@ instance_groups: scheduler.host: *scheduler_domain cf: *cf_credentials operator: + server: + port: &operatorServerPort 6208 enable_db_lock: true health: - port: &operatorHealthPort 6208 username: operator password: ((autoscaler_operator_health_password)) scaling_engine: @@ -403,7 +404,7 @@ instance_groups: routes: - name: autoscaler_operator_health registration_interval: 20s - port: *operatorHealthPort + port: *operatorServerPort tags: component: autoscaler_operator_health uris: @@ -429,8 +430,9 @@ instance_groups: policy_db_connection_config: *databaseConnectionConfig lock_db: *database eventgenerator: + server: + port: &eventGeneratorServerPort 6105 health: - port: &eventGeneratorHealthPort 6205 username: eventgenerator password: ((autoscaler_eventgenerator_health_password)) logging: @@ -459,7 +461,7 @@ instance_groups: routes: - name: autoscaler_eventgenerator_health registration_interval: 20s - port: *eventGeneratorHealthPort + port: *eventGeneratorServerPort tags: component: autoscaler_eventgenerator_health uris: