Skip to content

Commit

Permalink
Create workers for a cluster based on the count
Browse files Browse the repository at this point in the history
	Use attribute worker_num which is used to create the workers based on count
	worker_num conflicts with workers attribute
  • Loading branch information
Praveengostu authored and hkantare committed Apr 30, 2018
1 parent 72a5e57 commit 80ec9cc
Show file tree
Hide file tree
Showing 4 changed files with 311 additions and 11 deletions.
95 changes: 87 additions & 8 deletions ibm/resource_ibm_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,9 @@ func resourceIBMContainerCluster() *schema.Resource {
Description: "The datacenter where this cluster will be deployed",
},
"workers": {
Type: schema.TypeList,
Required: true,
Type: schema.TypeList,
Optional: true,
ConflictsWith: []string{"worker_num"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Expand All @@ -77,6 +78,22 @@ func resourceIBMContainerCluster() *schema.Resource {
},
},

"worker_num": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: "Number of worker nodes",
ConflictsWith: []string{"workers"},
ValidateFunc: validateWorkerNum,
},

"workers_info": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: "The IDs of the worker node",
},

"disk_encryption": {
Type: schema.TypeBool,
Optional: true,
Expand Down Expand Up @@ -140,10 +157,7 @@ func resourceIBMContainerCluster() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"worker_num": {
Type: schema.TypeInt,
Computed: true,
},

"subnet_id": {
Type: schema.TypeSet,
Optional: true,
Expand Down Expand Up @@ -212,7 +226,6 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{})

name := d.Get("name").(string)
datacenter := d.Get("datacenter").(string)
workers := d.Get("workers").([]interface{})
billing := d.Get("billing").(string)
machineType := d.Get("machine_type").(string)
publicVlanID := d.Get("public_vlan_id").(string)
Expand All @@ -221,11 +234,26 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{})
noSubnet := d.Get("no_subnet").(bool)
isolation := d.Get("isolation").(string)
diskEncryption := d.Get("disk_encryption").(bool)
var workers []interface{}
var workerNum int
if v, ok := d.GetOk("workers"); ok {
workers = v.([]interface{})
workerNum = len(workers)
}

if v, ok := d.GetOk("worker_num"); ok {
workerNum = v.(int)
}

if workerNum == 0 {
return fmt.Errorf(
"Please set either the wokers with valid array or worker_num with value grater than 0")
}

params := v1.ClusterCreateRequest{
Name: name,
Datacenter: datacenter,
WorkerNum: len(workers),
WorkerNum: workerNum,
Billing: billing,
MachineType: machineType,
PublicVlan: publicVlanID,
Expand Down Expand Up @@ -333,6 +361,7 @@ func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) e
if err != nil {
return err
}
wrkAPI := csClient.Workers()

targetEnv := getClusterTargetHeader(d)

Expand All @@ -342,12 +371,22 @@ func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) e
return fmt.Errorf("Error retrieving armada cluster: %s", err)
}

workerFields, err := wrkAPI.List(clusterID, targetEnv)
if err != nil {
return fmt.Errorf("Error retrieving workers for cluster: %s", err)
}
workers := make([]string, len(workerFields))
for i, worker := range workerFields {
workers[i] = worker.ID
}

d.Set("name", cls.Name)
d.Set("server_url", cls.ServerURL)
d.Set("ingress_hostname", cls.IngressHostname)
d.Set("ingress_secret", cls.IngressSecretName)
d.Set("worker_num", cls.WorkerCount)
d.Set("subnet_id", d.Get("subnet_id").(*schema.Set))
d.Set("workers_info", workers)
d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0])
return nil
}
Expand Down Expand Up @@ -389,6 +428,46 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{})
}

workersInfo := []map[string]string{}
if d.HasChange("worker_num") {
old, new := d.GetChange("worker_num")
oldCount := old.(int)
newCount := new.(int)
if newCount > oldCount {
count := newCount - oldCount
machineType := d.Get("machine_type").(string)
publicVlanID := d.Get("public_vlan_id").(string)
privateVlanID := d.Get("private_vlan_id").(string)
isolation := d.Get("isolation").(string)
params := v1.WorkerParam{
WorkerNum: count,
MachineType: machineType,
PublicVlan: publicVlanID,
PrivateVlan: privateVlanID,
Isolation: isolation,
}
wrkAPI.Add(clusterID, params, targetEnv)
} else if oldCount > newCount {
count := oldCount - newCount
workerFields, err := wrkAPI.List(clusterID, targetEnv)
if err != nil {
return fmt.Errorf("Error retrieving workers for cluster: %s", err)
}
for i := 0; i < count; i++ {
err := wrkAPI.Delete(clusterID, workerFields[i].ID, targetEnv)
if err != nil {
return fmt.Errorf(
"Error deleting workers of cluster (%s): %s", d.Id(), err)
}
}
}

_, err = WaitForWorkerAvailable(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
"Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err)
}

}
if d.HasChange("workers") {
oldWorkers, newWorkers := d.GetChange("workers")
oldWorker := oldWorkers.([]interface{})
Expand Down
186 changes: 186 additions & 0 deletions ibm/resource_ibm_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package ibm
import (
"fmt"
"log"
"regexp"
"strings"
"testing"

Expand Down Expand Up @@ -58,6 +59,69 @@ func TestAccIBMContainerCluster_basic(t *testing.T) {
})
}

func TestAccIBMContainerCluster_worker_count(t *testing.T) {
clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckIBMContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckIBMContainerCluster_worker_count(clusterName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"ibm_container_cluster.testacc_cluster", "name", clusterName),
resource.TestCheckResourceAttr(
"ibm_container_cluster.testacc_cluster", "worker_num", "1"),
resource.TestCheckResourceAttr(
"ibm_container_cluster.testacc_cluster", "workers_info.#", "1"),
),
},
{
Config: testAccCheckIBMContainerCluster_worker_count_update(clusterName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"ibm_container_cluster.testacc_cluster", "name", clusterName),
resource.TestCheckResourceAttr(
"ibm_container_cluster.testacc_cluster", "worker_num", "2"),
resource.TestCheckResourceAttr(
"ibm_container_cluster.testacc_cluster", "workers_info.#", "2"),
),
},
},
})
}

func TestAccIBMContainerCluster_without_workers_worker_num(t *testing.T) {
clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckIBMContainerClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckIBMContainerCluster_without_workers_worker_num(clusterName),
ExpectError: regexp.MustCompile(" Please set either the wokers with valid array"),
},
},
})
}

func TestAccIBMContainerCluster_with_worker_num_zero(t *testing.T) {
clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckIBMContainerClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckIBMContainerCluster_with_worker_num_zero(clusterName),
ExpectError: regexp.MustCompile("must be greater than 0"),
},
},
})
}

func TestAccIBMContainerCluster_diskEnc(t *testing.T) {
clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
Expand Down Expand Up @@ -296,6 +360,66 @@ resource "ibm_container_cluster" "testacc_cluster" {
} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID)
}

func testAccCheckIBMContainerCluster_without_workers_worker_num(clusterName string) string {
return fmt.Sprintf(`
data "ibm_org" "org" {
org = "%s"
}
data "ibm_space" "space" {
org = "%s"
space = "%s"
}
data "ibm_account" "acc" {
org_guid = "${data.ibm_org.org.id}"
}
resource "ibm_container_cluster" "testacc_cluster" {
name = "%s"
datacenter = "%s"
account_guid = "${data.ibm_account.acc.id}"
machine_type = "%s"
isolation = "public"
public_vlan_id = "%s"
private_vlan_id = "%s"
no_subnet = true
} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID)
}

func testAccCheckIBMContainerCluster_with_worker_num_zero(clusterName string) string {
return fmt.Sprintf(`
data "ibm_org" "org" {
org = "%s"
}
data "ibm_space" "space" {
org = "%s"
space = "%s"
}
data "ibm_account" "acc" {
org_guid = "${data.ibm_org.org.id}"
}
resource "ibm_container_cluster" "testacc_cluster" {
name = "%s"
datacenter = "%s"
account_guid = "${data.ibm_account.acc.id}"
worker_num = 0
machine_type = "%s"
isolation = "public"
public_vlan_id = "%s"
private_vlan_id = "%s"
no_subnet = true
} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID)
}

func testAccCheckIBMContainerClusterOptionalOrgSpace_basic(clusterName string) string {
return fmt.Sprintf(`
Expand Down Expand Up @@ -552,3 +676,65 @@ resource "ibm_container_cluster" "testacc_cluster" {
tags = ["test","once"]
} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID)
}

func testAccCheckIBMContainerCluster_worker_count(clusterName string) string {
return fmt.Sprintf(`
data "ibm_org" "org" {
org = "%s"
}
data "ibm_space" "space" {
org = "%s"
space = "%s"
}
data "ibm_account" "acc" {
org_guid = "${data.ibm_org.org.id}"
}
resource "ibm_container_cluster" "testacc_cluster" {
name = "%s"
datacenter = "%s"
account_guid = "${data.ibm_account.acc.id}"
worker_num = 1
machine_type = "%s"
isolation = "public"
public_vlan_id = "%s"
private_vlan_id = "%s"
no_subnet = true
} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID)
}

func testAccCheckIBMContainerCluster_worker_count_update(clusterName string) string {
return fmt.Sprintf(`
data "ibm_org" "org" {
org = "%s"
}
data "ibm_space" "space" {
org = "%s"
space = "%s"
}
data "ibm_account" "acc" {
org_guid = "${data.ibm_org.org.id}"
}
resource "ibm_container_cluster" "testacc_cluster" {
name = "%s"
datacenter = "%s"
account_guid = "${data.ibm_account.acc.id}"
worker_num = 2
machine_type = "%s"
isolation = "public"
public_vlan_id = "%s"
private_vlan_id = "%s"
no_subnet = true
} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID)
}
10 changes: 10 additions & 0 deletions ibm/validators.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,16 @@ func validateAppInstance(v interface{}, k string) (ws []string, errors []error)

}

func validateWorkerNum(v interface{}, k string) (ws []string, errors []error) {
workerNum := v.(int)
if workerNum <= 0 {
errors = append(errors, fmt.Errorf(
"%q must be greater than 0", k))
}
return

}

func validateAppZipPath(v interface{}, k string) (ws []string, errors []error) {
path := v.(string)
applicationZip, err := homedir.Expand(path)
Expand Down
Loading

0 comments on commit 80ec9cc

Please sign in to comment.