From 80ec9cc25601584bcc69c0b9104b2b69002ab478 Mon Sep 17 00:00:00 2001 From: "Praveen K. Gostu" Date: Mon, 30 Apr 2018 12:32:12 +0530 Subject: [PATCH] Create workers for a cluster based on the count Use attribute worker_num which is used to create the workers based on count worker_num conflicts with workers attribute --- ibm/resource_ibm_container_cluster.go | 95 ++++++++- ibm/resource_ibm_container_cluster_test.go | 186 ++++++++++++++++++ ibm/validators.go | 10 + .../docs/r/container_cluster.html.markdown | 31 ++- 4 files changed, 311 insertions(+), 11 deletions(-) diff --git a/ibm/resource_ibm_container_cluster.go b/ibm/resource_ibm_container_cluster.go index 22ca05a332..acb790c5d8 100644 --- a/ibm/resource_ibm_container_cluster.go +++ b/ibm/resource_ibm_container_cluster.go @@ -50,8 +50,9 @@ func resourceIBMContainerCluster() *schema.Resource { Description: "The datacenter where this cluster will be deployed", }, "workers": { - Type: schema.TypeList, - Required: true, + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"worker_num"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -77,6 +78,22 @@ func resourceIBMContainerCluster() *schema.Resource { }, }, + "worker_num": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: "Number of worker nodes", + ConflictsWith: []string{"workers"}, + ValidateFunc: validateWorkerNum, + }, + + "workers_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IDs of the worker node", + }, + "disk_encryption": { Type: schema.TypeBool, Optional: true, @@ -140,10 +157,7 @@ func resourceIBMContainerCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "worker_num": { - Type: schema.TypeInt, - Computed: true, - }, + "subnet_id": { Type: schema.TypeSet, Optional: true, @@ -212,7 +226,6 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) name := d.Get("name").(string) datacenter := d.Get("datacenter").(string) - workers := d.Get("workers").([]interface{}) billing := d.Get("billing").(string) machineType := d.Get("machine_type").(string) publicVlanID := d.Get("public_vlan_id").(string) @@ -221,11 +234,26 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) noSubnet := d.Get("no_subnet").(bool) isolation := d.Get("isolation").(string) diskEncryption := d.Get("disk_encryption").(bool) + var workers []interface{} + var workerNum int + if v, ok := d.GetOk("workers"); ok { + workers = v.([]interface{}) + workerNum = len(workers) + } + + if v, ok := d.GetOk("worker_num"); ok { + workerNum = v.(int) + } + + if workerNum == 0 { + return fmt.Errorf( + "Please set either the wokers with valid array or worker_num with value grater than 0") + } params := v1.ClusterCreateRequest{ Name: name, Datacenter: datacenter, - WorkerNum: len(workers), + WorkerNum: workerNum, Billing: billing, MachineType: machineType, PublicVlan: publicVlanID, @@ -333,6 +361,7 @@ func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) e if err != nil { return err } + wrkAPI := csClient.Workers() targetEnv := getClusterTargetHeader(d) @@ -342,12 +371,22 @@ func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error retrieving armada cluster: %s", err) } + workerFields, err := wrkAPI.List(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + workers := make([]string, len(workerFields)) + for i, worker := range workerFields { + workers[i] = worker.ID + } + d.Set("name", cls.Name) d.Set("server_url", cls.ServerURL) d.Set("ingress_hostname", cls.IngressHostname) d.Set("ingress_secret", cls.IngressSecretName) d.Set("worker_num", cls.WorkerCount) d.Set("subnet_id", d.Get("subnet_id").(*schema.Set)) + d.Set("workers_info", workers) d.Set("kube_version", strings.Split(cls.MasterKubeVersion, "_")[0]) return nil } @@ -389,6 +428,46 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{}) } workersInfo := []map[string]string{} + if d.HasChange("worker_num") { + old, new := d.GetChange("worker_num") + oldCount := old.(int) + newCount := new.(int) + if newCount > oldCount { + count := newCount - oldCount + machineType := d.Get("machine_type").(string) + publicVlanID := d.Get("public_vlan_id").(string) + privateVlanID := d.Get("private_vlan_id").(string) + isolation := d.Get("isolation").(string) + params := v1.WorkerParam{ + WorkerNum: count, + MachineType: machineType, + PublicVlan: publicVlanID, + PrivateVlan: privateVlanID, + Isolation: isolation, + } + wrkAPI.Add(clusterID, params, targetEnv) + } else if oldCount > newCount { + count := oldCount - newCount + workerFields, err := wrkAPI.List(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + for i := 0; i < count; i++ { + err := wrkAPI.Delete(clusterID, workerFields[i].ID, targetEnv) + if err != nil { + return fmt.Errorf( + "Error deleting workers of cluster (%s): %s", d.Id(), err) + } + } + } + + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + + } if d.HasChange("workers") { oldWorkers, newWorkers := d.GetChange("workers") oldWorker := oldWorkers.([]interface{}) diff --git a/ibm/resource_ibm_container_cluster_test.go b/ibm/resource_ibm_container_cluster_test.go index 75bee8e7a3..9e9cad772a 100644 --- a/ibm/resource_ibm_container_cluster_test.go +++ b/ibm/resource_ibm_container_cluster_test.go @@ -3,6 +3,7 @@ package ibm import ( "fmt" "log" + "regexp" "strings" "testing" @@ -58,6 +59,69 @@ func TestAccIBMContainerCluster_basic(t *testing.T) { }) } +func TestAccIBMContainerCluster_worker_count(t *testing.T) { + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMContainerClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerCluster_worker_count(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "name", clusterName), + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "worker_num", "1"), + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "workers_info.#", "1"), + ), + }, + { + Config: testAccCheckIBMContainerCluster_worker_count_update(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "name", clusterName), + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "worker_num", "2"), + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "workers_info.#", "2"), + ), + }, + }, + }) +} + +func TestAccIBMContainerCluster_without_workers_worker_num(t *testing.T) { + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMContainerCluster_without_workers_worker_num(clusterName), + ExpectError: regexp.MustCompile(" Please set either the wokers with valid array"), + }, + }, + }) +} + +func TestAccIBMContainerCluster_with_worker_num_zero(t *testing.T) { + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMContainerCluster_with_worker_num_zero(clusterName), + ExpectError: regexp.MustCompile("must be greater than 0"), + }, + }, + }) +} + func TestAccIBMContainerCluster_diskEnc(t *testing.T) { clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ @@ -296,6 +360,66 @@ resource "ibm_container_cluster" "testacc_cluster" { } `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID) } +func testAccCheckIBMContainerCluster_without_workers_worker_num(clusterName string) string { + return fmt.Sprintf(` + +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + + account_guid = "${data.ibm_account.acc.id}" + + machine_type = "%s" + isolation = "public" + public_vlan_id = "%s" + private_vlan_id = "%s" + no_subnet = true +} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID) +} + +func testAccCheckIBMContainerCluster_with_worker_num_zero(clusterName string) string { + return fmt.Sprintf(` + +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + + account_guid = "${data.ibm_account.acc.id}" + worker_num = 0 + machine_type = "%s" + isolation = "public" + public_vlan_id = "%s" + private_vlan_id = "%s" + no_subnet = true +} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID) +} + func testAccCheckIBMContainerClusterOptionalOrgSpace_basic(clusterName string) string { return fmt.Sprintf(` @@ -552,3 +676,65 @@ resource "ibm_container_cluster" "testacc_cluster" { tags = ["test","once"] } `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID) } + +func testAccCheckIBMContainerCluster_worker_count(clusterName string) string { + return fmt.Sprintf(` + +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + + account_guid = "${data.ibm_account.acc.id}" + + worker_num = 1 + + machine_type = "%s" + isolation = "public" + public_vlan_id = "%s" + private_vlan_id = "%s" + no_subnet = true +} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID) +} + +func testAccCheckIBMContainerCluster_worker_count_update(clusterName string) string { + return fmt.Sprintf(` + +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "%s" + + account_guid = "${data.ibm_account.acc.id}" + worker_num = 2 + machine_type = "%s" + isolation = "public" + public_vlan_id = "%s" + private_vlan_id = "%s" + no_subnet = true +} `, cfOrganization, cfOrganization, cfSpace, clusterName, datacenter, machineType, publicVlanID, privateVlanID) +} diff --git a/ibm/validators.go b/ibm/validators.go index b8a3109dbd..c8de150c52 100644 --- a/ibm/validators.go +++ b/ibm/validators.go @@ -116,6 +116,16 @@ func validateAppInstance(v interface{}, k string) (ws []string, errors []error) } +func validateWorkerNum(v interface{}, k string) (ws []string, errors []error) { + workerNum := v.(int) + if workerNum <= 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than 0", k)) + } + return + +} + func validateAppZipPath(v interface{}, k string) (ws []string, errors []error) { path := v.(string) applicationZip, err := homedir.Expand(path) diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 77e3f9d2d4..870460ecee 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -45,6 +45,29 @@ resource "ibm_container_cluster" "testacc_cluster" { } ``` +Create the Kubernetes cluster using worker_num: + +```hcl +resource "ibm_container_cluster" "testacc_cluster" { + name = "test" + datacenter = "dal10" + machine_type = "free" + isolation = "public" + public_vlan_id = "vlan" + private_vlan_id = "vlan" + subnet_id = ["1154643"] + + worker_num = 2 + webhook = [{ + level = "Normal" + type = "slack" + url = "https://hooks.slack.com/services/yt7rebjhgh2r4rd44fjk" + }] + + account_guid = "test_acc" +} +``` + ## Argument Reference The following arguments are supported: @@ -55,10 +78,13 @@ The following arguments are supported: * `org_guid` - (Optional, string) The GUID for the IBM Cloud organization associated with the cluster. You can retrieve the value from data source `ibm_org` or by running the `bx iam orgs --guid` command in the IBM Cloud CLI. * `space_guid` - (Optional, string) The GUID for the IBM Cloud space associated with the cluster. You can retrieve the value from data source `ibm_space` or by running the `bx iam space --guid` command in the IBM Cloud CLI. * `account_guid` - (Required, string) The GUID for the IBM Cloud account associated with the cluster. You can retrieve the value from data source `ibm_account` or by running the `bx iam accounts` command in the IBM Cloud CLI. -* `workers` - (Required, array) The worker nodes that you want to add to the cluster. Nested `workers` blocks have the following structure: +* `workers` - (Optional, array) The worker nodes that you want to add to the cluster. Nested `workers` blocks have the following structure: * `action` - valid actions are add, reboot and reload. * `name` - Name of the worker. * `version` - worker version. + **NOTE**: Conflicts with `worker_num`. +* `worker_num` - (Optional, int) The number of cluster worker nodes. + **NOTE**: Conflicts with `workers`. * `machinetype` - (Optional, string) The machine type of the worker nodes. You can retrieve the value by running the `bx cs machine-types ` command in the IBM Cloud CLI. * `billing` - (Optional, string) The billing type for the instance. Accepted values are `hourly` or `monthly`. * `isolation` - (Optional, string) Accepted values are `public` or `private`. Use `private` if you want to have available physical resources dedicated to you only or `public` to allow physical resources to be shared with other IBM customers. @@ -81,8 +107,7 @@ The following attributes are exported: * `server_url` - The server URL. * `ingress_hostname` - The Ingress hostname. * `ingress_secret` - The Ingress secret. -* `worker_num` - The number of worker nodes for this cluster. -* `workers` - The worker nodes attached to this cluster. +* `workers_info` - The worker nodes attached to this cluster. * `subnet_id` - The subnets attached to this cluster. * `workers` - Exported attributes are: * `id` - The id of the worker \ No newline at end of file