diff --git a/docs/data-sources/nks_cluster.md b/docs/data-sources/nks_cluster.md index ec7f2c209..765ca3950 100644 --- a/docs/data-sources/nks_cluster.md +++ b/docs/data-sources/nks_cluster.md @@ -27,13 +27,15 @@ In addition to all arguments above, the following attributes are exported: * `id` - Cluster uuid. * `endpoint` - Control Plane API address. * `lb_private_subnet_no` - Subnet No. for private loadbalancer only. -* `lb_public_subnet_no` - Subnet No. for public loadbalancer only. (Available only `SGN`, `JPN` region) +* `lb_public_subnet_no` - Subnet No. for public loadbalancer only. (Supported on `public`, `gov` site) * `subnet_no_list` - Subnet No. list. * `public_network` - Public Subnet Network * `kube_network_plugin` - Kubernetes network plugin. +* `hypervisor_code` - Hypervisor code. * `cluster_type` - Cluster type. `Maximum number of nodes` - * 10 ea : `SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002` - * 50 ea : `SVR.VNKS.STAND.C004.M016.NET.SSD.B050.G002` + * `XEN` / `RHV` + * 10 ea : `SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002` + * 50 ea : `SVR.VNKS.STAND.C004.M016.NET.SSD.B050.G002` * `login_key_name` - Login key name. * `zone` - zone Code. * `vpc_no` - VPC No. @@ -49,8 +51,8 @@ In addition to all arguments above, the following attributes are exported: * `groups_prefix` - Groups prefix. * `groups_claim` - Groups claim. * `required_claim` - Required claim. -* `ip_acl_default_action` - IP ACL default action.(Available only `public`, `gov` site) -* `ip_acl` (Available only `public`, `gov` site) +* `ip_acl_default_action` - IP ACL default action. (Supported on `public`, `gov` site) +* `ip_acl` (Supported on `public`, `gov` site) * `action` - `allow`, `deny` * `address` - CIDR * `comment` - Comment \ No newline at end of file diff --git a/docs/data-sources/nks_node_pool.md b/docs/data-sources/nks_node_pool.md index e0d1794b2..8611f86f0 100644 --- a/docs/data-sources/nks_node_pool.md +++ b/docs/data-sources/nks_node_pool.md @@ -48,4 +48,11 @@ In addition to all arguments above, the following attributes are exported: * `node_status` - Node Status. * `container_version` - Container version of node. * `kernel_version` - kernel version of node. -* `k8s_version` - Kubenretes version . \ No newline at end of file +* `k8s_version` - Kubenretes version. +* `label` - NodePool label. + * `key` - Label key. + * `value` - Label value. +* `taint` - NodePool taint. + * `key` - Taint key. + * `value` - Taint value. + * `effect` - Taint effect. \ No newline at end of file diff --git a/docs/data-sources/nks_server_images.md b/docs/data-sources/nks_server_images.md index 24873b35b..6dc459a3c 100644 --- a/docs/data-sources/nks_server_images.md +++ b/docs/data-sources/nks_server_images.md @@ -10,7 +10,7 @@ data "ncloud_nks_server_images" "images" {} data "ncloud_nks_server_images" "ubuntu20" { filter { name = "label" - values = ["ubuntu-20.04-64-server"] + values = ["ubuntu-20.04"] regex = true } } diff --git a/docs/data-sources/nks_server_products.md b/docs/data-sources/nks_server_products.md index 48613934b..b1268220d 100644 --- a/docs/data-sources/nks_server_products.md +++ b/docs/data-sources/nks_server_products.md @@ -11,7 +11,8 @@ data "ncloud_nks_server_products" "products" {} data "ncloud_nks_server_images" "images"{ filter { name = "label" - values = ["ubuntu-20.04-64-server"] + values = ["ubuntu-20.04"] + regex = true } } @@ -26,6 +27,27 @@ data "ncloud_nks_server_products" "product" { } } +data "ncloud_nks_server_products" "product_detail_filter" { + + software_code = data.ncloud_nks_server_images.images.images[0].value + zone = "KR-1" + + filter { + name = "product_type" + values = [ "STAND"] + } + + filter { + name = "cpu_count" + values = [ "2"] + } + + filter { + name = "memory_size" + values = [ "8GB" ] + } +} + ``` ## Argument Reference @@ -45,12 +67,11 @@ The following arguments are supported: * `products` - A list of ServerProduct * `label` - ServerProduct spec korean description * `value` - ServerProduct code - * `detail` - * `cpu_count` - Number of cpu - * `gpu_count` - Number of gpu - * `gpu_memory_size` - Size of GPU memory(GB) - * `memory_size` - Size of memory(GB) - * `product_code` - ServerProduct code - * `product_english_desc` - ServerProduct spec english description - * `product_korean_desc` - ServerProduct spec korean description - * `product_type` - ServerProduct Type \ No newline at end of file + * `cpu_count` - Number of cpu + * `gpu_count` - Number of gpu + * `gpu_memory_size` - Size of GPU memory(GB) + * `memory_size` - Size of memory(GB) + * `product_code` - ServerProduct code + * `product_english_desc` - ServerProduct spec english description + * `product_korean_desc` - ServerProduct spec korean description + * `product_type` - ServerProduct Type \ No newline at end of file diff --git a/docs/resources/nks_cluster.md b/docs/resources/nks_cluster.md index 9d6496055..e80f96c4f 100644 --- a/docs/resources/nks_cluster.md +++ b/docs/resources/nks_cluster.md @@ -1,6 +1,6 @@ # Resource: ncloud_nks_cluster -Provides a Kubernetes Service cluster resource. +Provides a Kubernetes Service Cluster resource. ## Example Usage @@ -25,7 +25,7 @@ resource "ncloud_subnet" "subnet_lb" { subnet = "10.0.100.0/24" zone = "KR-1" network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PUBLIC" + subnet_type = "PRIVATE" name = "subnet-lb" usage_type = "LOADB" } @@ -34,7 +34,7 @@ resource "ncloud_subnet" "subnet_lb" { data "ncloud_nks_versions" "version" { filter { name = "value" - values = ["1.20"] + values = ["1.25"] regex = true } } @@ -67,16 +67,19 @@ resource "ncloud_nks_cluster" "cluster" { The following arguments are supported: * `name` - (Required) Cluster name. +* `hypervisor_code` - (Optional) Hypervisor code. `XEN`(Default), `RHV` * `cluster_type` -(Required) Cluster type. `Maximum number of nodes` - * 10 ea : `SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002` - * 50 ea : `SVR.VNKS.STAND.C004.M016.NET.SSD.B050.G002` + * `XEN` / `RHV` + * 10 ea : `SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002` + * 50 ea : `SVR.VNKS.STAND.C004.M016.NET.SSD.B050.G002` * `login_key_name` - (Required) Login key name. * `zone` - (Required) zone Code. * `vpc_no` - (Required) VPC No. * `subnet_no_list` - (Required) Subnet No. list. * `public_network` - (Optional) Public Subnet Network (`boolean`) * `lb_private_subnet_no` - (Required) Subnet No. for private loadbalancer only. -* `lb_public_subnet_no` - (Optional) Subnet No. for public loadbalancer only. (Available only `SGN`, `JPN` region) +* `lb_public_subnet_no` - (Optional) Subnet No. for public loadbalancer only. (Required in `KR`, `SG`, `JP` regions in public site) +* `kube_network_plugin` - (Optional) Specifies the network plugin. Only Cilium is supported. * `log` - (Optional) * `audit` - (Required) Audit log availability. (`boolean`) * `k8s_version` - (Optional) Kubenretes version. Only upgrade is supported. @@ -88,11 +91,12 @@ The following arguments are supported: * `groups_prefix` - (Optional) Groups prefix. * `groups_claim` - (Optional) Groups claim. * `required_claim` - (Optional) Required claim. -* `ip_acl_default_action` - (Optional) IP ACL default action.`allow`(default), `deny` -* `ip_acl` (Optional) +* `ip_acl_default_action` - (Optional) IP ACL default action. `allow`, `deny` (Supported on `public`, `gov` site) +* `ip_acl` (Optional) (Supported on `public`, `gov` site) * `action` - (Required) `allow`, `deny` * `address` - (Required) CIDR * `comment` - (Optional) Comment + ## Attributes Reference In addition to all arguments above, the following attributes are exported: @@ -107,4 +111,3 @@ In addition to all arguments above, the following attributes are exported: Kubernetes Service Cluster can be imported using the name, e.g., $ terraform import ncloud_nks_cluster.my_cluster uuid - diff --git a/docs/resources/nks_node_pool.md b/docs/resources/nks_node_pool.md index 468cc29d3..bc26fbbfc 100644 --- a/docs/resources/nks_node_pool.md +++ b/docs/resources/nks_node_pool.md @@ -33,7 +33,7 @@ resource "ncloud_subnet" "subnet_lb" { data "ncloud_nks_versions" "version" { filter { name = "value" - values = ["1.23"] + values = ["1.25"] regex = true } } @@ -55,43 +55,41 @@ resource "ncloud_nks_cluster" "cluster" { } -data "ncloud_server_image" "image" { +data "ncloud_nks_server_images" "image"{ + hypervisor_code = "XEN" filter { - name = "product_name" + name = "label" values = ["ubuntu-20.04"] + regex = true } } -data "ncloud_server_product" "product" { - server_image_product_code = data.ncloud_server_image.image.product_code +data "ncloud_nks_server_products" "product"{ + software_code = data.ncloud_nks_server_images.image.images[0].value + zone = "KR-1" filter { name = "product_type" - values = [ "STAND" ] + values = [ "STAND"] } - + filter { name = "cpu_count" - values = [ 2 ] + values = [ "2"] } - + filter { name = "memory_size" values = [ "8GB" ] } - - filter { - name = "product_code" - values = [ "SSD" ] - regex = true - } } resource "ncloud_nks_node_pool" "node_pool" { cluster_uuid = ncloud_nks_cluster.cluster.uuid node_pool_name = "sample-node-pool" node_count = 1 - product_code = data.ncloud_server_product_code.product.product_code + software_code = data.ncloud_nks_server_images.image.images[0].value + product_code = data.ncloud_nks_server_products.product[0].value subnet_no = ncloud_subnet.subnet.id autoscale { enabled = true @@ -108,21 +106,27 @@ The following arguments are supported: * `node_pool_name` - (Required) Nodepool name. * `cluster_uuid` - (Required) Cluster uuid. * `node_count` - (Required) Number of nodes. -* `product_code` - (Required) Product code. +* `product_code` - (Optional) Product code. Required for `XEN`/`RHV` cluster nodepool. * `software_code` - (Optional) Server image code. * `autoscale`- (Optional) * `enable` - (Required) Auto scaling availability. * `max` - (Required) Maximum number of nodes available for auto scaling. * `min` - (Required) Minimum number of nodes available for auto scaling. * `subnet_no` - (Deprecated) Subnet No. -* `subnet_no_list` - Subnet no list. +* `subnet_no_list` - (Optional) Subnet no list. * `k8s_version` - (Optional) Kubenretes version. Only upgrade is supported. - +* `label` - (Optional) NodePool label. + * `key` - (Required) Label key. + * `value` - (Required) Label value. +* `taint` - (Optional) NodePool taint. + * `key` - (Required) Taint key. + * `value` - (Required) Taint value. + * `effect` - (Required) Taint effect. ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* `id` - The ID of nodepool.`CusterUuid:NodePoolName` +* `id` - The ID of nodepool.`cluster_uuid:node_pool_name` * `instance_no` - Instance No. * `nodes`- Running nodes in nodepool. * `name` - The name of Server instance. @@ -138,4 +142,3 @@ In addition to all arguments above, the following attributes are exported: NKS Node Pools can be imported using the cluster_name and node_pool_name separated by a colon (:), e.g., $ terraform import ncloud_nks_node_pool.my_node_pool uuid:my_node_pool - diff --git a/examples/nks/main.tf b/examples/nks/main.tf index 3eb227a12..143d1decf 100644 --- a/examples/nks/main.tf +++ b/examples/nks/main.tf @@ -60,50 +60,54 @@ resource "ncloud_nks_cluster" "cluster" { } } -data "ncloud_nks_server_images" "images"{ +data "ncloud_nks_server_images" "image"{ + hypervisor_code = "XEN" filter { name = "label" - values = ["ubuntu-20.04-64-server"] + values = ["ubuntu-20.04"] + regex = true } } -data "ncloud_nks_server_products" "products" { - - software_code = data.ncloud_nks_server_images.images.images[0].value +data "ncloud_nks_server_products" "nks_products"{ + software_code = data.ncloud_nks_server_images.image.images[0].value zone = "KR-1" filter { name = "product_type" - values = [ "STAND" ] + values = [ "STAND"] } filter { name = "cpu_count" - values = [ 2 ] + values = [ "2"] } filter { name = "memory_size" values = [ "8GB" ] } - - filter { - name = "product_code" - values = [ "SSD" ] - regex = true - } } resource "ncloud_nks_node_pool" "node_pool" { cluster_uuid = ncloud_nks_cluster.cluster.uuid node_pool_name = "pool1" node_count = 1 - product_code = data.ncloud_nks_server_products.products.products[0].value - software_code = data.ncloud_nks_server_images.images.images[0].value - subnet_no = ncloud_subnet.node_subnet.id + software_code = data.ncloud_nks_server_images.image.images[0].value + product_code = data.ncloud_nks_server_products.nks_products.products[0].value + subnet_no_list = [ncloud_subnet.node_subnet.id] autoscale { enabled = true min = 1 max = 2 } + label { + key = "foo" + value = "bar" + } + taint { + key = "foo" + value = "bar" + effect = "NoExecute" + } } diff --git a/examples/nks/variables.tf b/examples/nks/variables.tf index 39f59c993..c8cbffd63 100644 --- a/examples/nks/variables.tf +++ b/examples/nks/variables.tf @@ -3,7 +3,7 @@ variable name { } variable nks_version { - default = "1.23" + default = "1.25" } variable client_ip { diff --git a/go.mod b/go.mod index 7a662070b..5f69512b0 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-ncloud go 1.19 require ( - github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.4 + github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.7 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 gopkg.in/yaml.v3 v3.0.1 @@ -47,10 +47,10 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.6 // indirect google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect google.golang.org/grpc v1.50.1 // indirect diff --git a/go.sum b/go.sum index 125072438..548d6c424 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.4 h1:/PXqrDFlF3U/jjaGkrEpXYt6EKalENZjNaHaoHL6718= -github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.4/go.mod h1:sDa6EITv6z/l6+d4VJk4OiRZnXuO0uG2Cm30qtqF4TU= +github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.7 h1:lJWWQeSAG0BUtKVVL6nb0n8/CBbKOVT32WD4oFP5bRY= +github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.7/go.mod h1:jRp8KZ64MUevBWNqehghhG2oF5/JU3Dmt/Cu7dp1mQE= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= @@ -194,13 +194,14 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0= -golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -216,8 +217,10 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -225,6 +228,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -243,19 +247,25 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -263,6 +273,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/ncloud/data_source_ncloud_nks_cluster.go b/ncloud/data_source_ncloud_nks_cluster.go index 61e654067..3b6b67456 100644 --- a/ncloud/data_source_ncloud_nks_cluster.go +++ b/ncloud/data_source_ncloud_nks_cluster.go @@ -29,6 +29,11 @@ func dataSourceNcloudNKSCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "hypervisor_code": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "endpoint": { Type: schema.TypeString, Computed: true, @@ -114,7 +119,7 @@ func dataSourceNcloudNKSCluster() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "groups_cliam": { + "groups_claim": { Type: schema.TypeString, Optional: true, }, @@ -186,6 +191,7 @@ func dataSourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, d.Set("uuid", cluster.Uuid) d.Set("name", cluster.Name) d.Set("cluster_type", cluster.ClusterType) + d.Set("hypervisor_code", cluster.HypervisorCode) d.Set("endpoint", cluster.Endpoint) d.Set("login_key_name", cluster.LoginKeyName) d.Set("k8s_version", cluster.K8sVersion) diff --git a/ncloud/data_source_ncloud_nks_cluster_test.go b/ncloud/data_source_ncloud_nks_cluster_test.go index aa0509138..83a02bc45 100644 --- a/ncloud/data_source_ncloud_nks_cluster_test.go +++ b/ncloud/data_source_ncloud_nks_cluster_test.go @@ -1,6 +1,7 @@ package ncloud import ( + "bytes" "fmt" "testing" @@ -9,17 +10,22 @@ import ( ) func TestAccDataSourceNcloudNKSCluster(t *testing.T) { + validateAcctestEnvironment(t) + dataName := "data.ncloud_nks_cluster.cluster" resourceName := "ncloud_nks_cluster.cluster" - testClusterName := getTestClusterName() - region, clusterType, _, k8sVersion := getRegionAndNKSType() + name := getTestClusterName() + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceNKSClusterConfig(testClusterName, clusterType, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), + Config: testAccDataSourceNKSClusterConfig(name, TF_TEST_NKS_LOGIN_KEY, true, nksInfo), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceID(dataName), resource.TestCheckResourceAttrPair(dataName, "id", resourceName, "id"), @@ -44,62 +50,50 @@ func TestAccDataSourceNcloudNKSCluster(t *testing.T) { }) } -func testAccDataSourceNKSClusterConfig(testClusterName string, clusterType string, loginKey string, version string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" -} - +func testAccDataSourceNKSClusterConfig(name string, loginKeyName string, auditLog bool, nksInfo *NKSTestInfo) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" - k8s_version = "%[4]s" - login_key_name = "%[3]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" + vpc_no = %[8]s + zone = "%[9]s-1" + log { + audit = %[10]t + } + oidc { + issuer_url = "https://keycloak.url/realms/nks" + client_id = "nks-client" + username_claim = "preferred_username" + username_prefix = "oidc:" + groups_claim = "groups" + groups_prefix = "oidc:" + required_claim = "iss=https://keycloak.url/realms/nks" + } +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region, auditLog)) + + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + + b.WriteString(` } +`) + b.WriteString(` data "ncloud_nks_cluster" "cluster" { uuid = ncloud_nks_cluster.cluster.uuid } - - -`, testClusterName, clusterType, loginKey, version, region) +`) + return b.String() } diff --git a/ncloud/data_source_ncloud_nks_kube_config_test.go b/ncloud/data_source_ncloud_nks_kube_config_test.go index 6ede4b765..d4057cb7a 100644 --- a/ncloud/data_source_ncloud_nks_kube_config_test.go +++ b/ncloud/data_source_ncloud_nks_kube_config_test.go @@ -1,6 +1,7 @@ package ncloud import ( + "bytes" "fmt" "testing" @@ -8,17 +9,22 @@ import ( ) func TestAccDataSourceNcloudNKSKubeConfig(t *testing.T) { + validateAcctestEnvironment(t) + dataName := "data.ncloud_nks_kube_config.kube_config" resourceName := "ncloud_nks_cluster.cluster" - testClusterName := getTestClusterName() - region, clusterType, _, k8sVersion := getRegionAndNKSType() + name := getTestClusterName() + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceNKSKubeConfigConfig(testClusterName, clusterType, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), + Config: testAccDataSourceNKSKubeConfigConfig(name, TF_TEST_NKS_LOGIN_KEY, true, nksInfo), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceID(dataName), resource.TestCheckResourceAttrPair(dataName, "cluster_uuid", resourceName, "uuid"), @@ -29,61 +35,50 @@ func TestAccDataSourceNcloudNKSKubeConfig(t *testing.T) { }) } -func testAccDataSourceNKSKubeConfigConfig(testClusterName string, clusterType string, loginKey string, version string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" -} - +func testAccDataSourceNKSKubeConfigConfig(name string, loginKeyName string, auditLog bool, nksInfo *NKSTestInfo) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" - k8s_version = "%[4]s" - login_key_name = "%[3]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" + kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" -} + vpc_no = %[8]s + zone = "%[9]s-1" + log { + audit = %[10]t + } + oidc { + issuer_url = "https://keycloak.url/realms/nks" + client_id = "nks-client" + username_claim = "preferred_username" + username_prefix = "oidc:" + groups_claim = "groups" + groups_prefix = "oidc:" + required_claim = "iss=https://keycloak.url/realms/nks" + } +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region, auditLog)) -data "ncloud_nks_kube_config" "kube_config" { - cluster_uuid = ncloud_nks_cluster.cluster.uuid -} + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + b.WriteString(` +} +`) -`, testClusterName, clusterType, loginKey, version, region) + b.WriteString(` + data "ncloud_nks_kube_config" "kube_config" { + cluster_uuid = ncloud_nks_cluster.cluster.uuid + } +`) + return b.String() } diff --git a/ncloud/data_source_ncloud_nks_node_pool.go b/ncloud/data_source_ncloud_nks_node_pool.go index 8ad91b74a..104d29638 100644 --- a/ncloud/data_source_ncloud_nks_node_pool.go +++ b/ncloud/data_source_ncloud_nks_node_pool.go @@ -55,6 +55,14 @@ func dataSourceNcloudNKSNodePool() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "storage_size": { + Type: schema.TypeString, + Computed: true, + }, + "server_spec_code": { + Type: schema.TypeString, + Computed: true, + }, "autoscale": { Type: schema.TypeList, Computed: true, @@ -75,6 +83,44 @@ func dataSourceNcloudNKSNodePool() *schema.Resource { }, }, }, + "label": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "taint": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Required: true, + }, + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "nodes": { Type: schema.TypeList, Computed: true, @@ -148,6 +194,9 @@ func dataSourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData d.Set("software_code", nodePool.SoftwareCode) d.Set("node_count", nodePool.NodeCount) d.Set("k8s_version", nodePool.K8sVersion) + d.Set("server_spec_code", nodePool.ServerSpecCode) + d.Set("storage_size", strconv.Itoa(int(ncloud.Int32Value(nodePool.StorageSize)))) + if len(nodePool.SubnetNoList) > 0 { if err := d.Set("subnet_no_list", flattenInt32ListToStringList(nodePool.SubnetNoList)); err != nil { log.Printf("[WARN] Error setting subnet no list set for (%s): %s", d.Id(), err) @@ -158,6 +207,14 @@ func dataSourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData log.Printf("[WARN] Error setting Autoscale set for (%s): %s", d.Id(), err) } + if err := d.Set("taint", flattenNKSNodePoolTaints(nodePool.Taints)); err != nil { + log.Printf("[WARN] Error setting taints set for (%s): %s", d.Id(), err) + } + + if err := d.Set("label", flattenNKSNodePoolLabels(nodePool.Labels)); err != nil { + log.Printf("[WARN] Error setting labels set for (%s): %s", d.Id(), err) + } + nodes, err := getNKSNodePoolWorkerNodes(ctx, config, clusterUuid, nodePoolName) if err != nil { return diag.FromErr(err) diff --git a/ncloud/data_source_ncloud_nks_node_pool_test.go b/ncloud/data_source_ncloud_nks_node_pool_test.go index eb4279b1b..e569842db 100644 --- a/ncloud/data_source_ncloud_nks_node_pool_test.go +++ b/ncloud/data_source_ncloud_nks_node_pool_test.go @@ -1,6 +1,7 @@ package ncloud import ( + "bytes" "fmt" "testing" @@ -8,18 +9,21 @@ import ( ) func TestAccDataSourceNcloudNKSNodePool(t *testing.T) { + validateAcctestEnvironment(t) + dataName := "data.ncloud_nks_node_pool.node_pool" resourceName := "ncloud_nks_node_pool.node_pool" - testClusterName := getTestClusterName() - - region, clusterType, productType, k8sVersion := getRegionAndNKSType() - + clusterName := getTestClusterName() + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceNKSNodePoolConfig(testClusterName, clusterType, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region, productType), + Config: testAccDataSourceNKSNodePoolConfig(clusterName, TF_TEST_NKS_LOGIN_KEY, nksInfo, 1), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceID(dataName), resource.TestCheckResourceAttrPair(dataName, "cluster_uuid", resourceName, "cluster_uuid"), @@ -46,56 +50,60 @@ func TestAccDataSourceNcloudNKSNodePool(t *testing.T) { }) } -func testAccDataSourceNKSNodePoolConfig(testClusterName string, clusterType string, loginKey string, version string, region string, productType string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" -} - +func testAccDataSourceNKSNodePoolConfig(name string, loginKeyName string, nksInfo *NKSTestInfo, nodeCount int32) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" - k8s_version = "%[4]s" - login_key_name = "%[3]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" + kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet1.id + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" + vpc_no = %[8]s + zone = "%[9]s-1" +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region)) + + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + + b.WriteString(` } +`) -resource "ncloud_nks_node_pool" "node_pool" { - cluster_uuid = ncloud_nks_cluster.cluster.uuid - node_pool_name = "%[1]s" - node_count = 1 - product_code = "%[6]s" - subnet_no = ncloud_subnet.subnet1.id - autoscale { - enabled = true - min = 1 - max = 1 + b.WriteString(fmt.Sprintf(` +data "ncloud_nks_server_images" "image"{ + hypervisor_code = ncloud_nks_cluster.cluster.hypervisor_code + filter { + name = "label" + values = ["ubuntu-20.04"] + regex = true + } +} + +data "ncloud_nks_server_products" "product"{ + software_code = data.ncloud_nks_server_images.image.images[0].value + zone = "%[1]s-1" + filter { + name = "product_type" + values = [ "STAND"] + } + + filter { + name = "cpu_count" + values = [ "2"] + } + + filter { + name = "memory_size" + values = [ "8GB" ] } } @@ -103,5 +111,45 @@ data "ncloud_nks_node_pool" "node_pool"{ cluster_uuid = ncloud_nks_node_pool.node_pool.cluster_uuid node_pool_name = ncloud_nks_node_pool.node_pool.node_pool_name } -`, testClusterName, clusterType, loginKey, version, region, productType) + +resource "ncloud_nks_node_pool" "node_pool" { + cluster_uuid = ncloud_nks_cluster.cluster.uuid + node_pool_name = "%[2]s" + node_count = %[3]d + k8s_version = "%[4]s" + subnet_no_list = [ %[5]s ] + autoscale { + enabled = false + max = 0 + min = 0 + } + + label { + key = "foo" + value = "bar" + } + + taint { + key = "foo" + effect = "NoSchedule" + value = "bar" + } + + software_code = data.ncloud_nks_server_images.image.images.0.value +`, nksInfo.Region, name, nodeCount, nksInfo.K8sVersion, *nksInfo.PrivateSubnetList[0].SubnetNo)) + if nksInfo.HypervisorCode == "KVM" { + b.WriteString(` + server_spec_code = data.ncloud_nks_server_products.product.products.0.value + storage_size = 100 +} + `) + + } else { + b.WriteString(` + product_code = data.ncloud_nks_server_products.product.products.0.value +} + `) + } + + return b.String() } diff --git a/ncloud/data_source_ncloud_nks_node_pools_test.go b/ncloud/data_source_ncloud_nks_node_pools_test.go index dc3939f33..5977f84b7 100644 --- a/ncloud/data_source_ncloud_nks_node_pools_test.go +++ b/ncloud/data_source_ncloud_nks_node_pools_test.go @@ -1,6 +1,7 @@ package ncloud import ( + "bytes" "fmt" "testing" @@ -8,17 +9,20 @@ import ( ) func TestAccDataSourceNcloudNKSNodePools(t *testing.T) { + validateAcctestEnvironment(t) - testClusterName := getTestClusterName() - - region, clusterType, productType, k8sVersion := getRegionAndNKSType() + clusterName := getTestClusterName() + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceNcloudNKSNodePoolsConfig(testClusterName, clusterType, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region, productType), + Config: testAccDataSourceNcloudNKSNodePoolsConfig(clusterName, TF_TEST_NKS_LOGIN_KEY, nksInfo, 1), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceID("data.ncloud_nks_node_pools.all"), ), @@ -27,72 +31,107 @@ func TestAccDataSourceNcloudNKSNodePools(t *testing.T) { }) } -func testAccDataSourceNcloudNKSNodePoolsConfig(testClusterName string, clusterType string, loginKey string, version string, region string, productType string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} +func testAccDataSourceNcloudNKSNodePoolsConfig(name string, loginKeyName string, nksInfo *NKSTestInfo, nodeCount int32) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` +resource "ncloud_nks_cluster" "cluster" { + name = "%[1]s" + cluster_type = "%[2]s" + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" + kube_network_plugin = "cilium" + subnet_no_list = [ + %[7]s + ] + vpc_no = %[8]s + zone = "%[9]s-1" +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region)) -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + + b.WriteString(` } +`) -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" + b.WriteString(fmt.Sprintf(` +data "ncloud_nks_server_images" "image"{ + hypervisor_code = ncloud_nks_cluster.cluster.hypervisor_code + filter { + name = "label" + values = ["ubuntu-20.04"] + regex = true + } } -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" +data "ncloud_nks_server_products" "product"{ + software_code = data.ncloud_nks_server_images.image.images[0].value + zone = "%[1]s-1" + filter { + name = "product_type" + values = [ "STAND"] + } + + filter { + name = "cpu_count" + values = [ "2"] + } + + filter { + name = "memory_size" + values = [ "8GB" ] + } } -resource "ncloud_nks_cluster" "cluster" { - name = "%[1]s" - cluster_type = "%[2]s" - k8s_version = "%[4]s" - login_key_name = "%[3]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id - subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, - ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" + +data "ncloud_nks_node_pools" "all" { + depends_on = [ncloud_nks_node_pool.node_pool] + cluster_uuid = ncloud_nks_cluster.cluster.uuid } resource "ncloud_nks_node_pool" "node_pool" { - cluster_uuid = ncloud_nks_cluster.cluster.uuid - node_pool_name = "%[1]s" - node_count = 1 - product_code = "%[6]s" - subnet_no = ncloud_subnet.subnet1.id + cluster_uuid = ncloud_nks_cluster.cluster.uuid + node_pool_name = "%[2]s" + node_count = %[3]d + k8s_version = "%[4]s" + subnet_no_list = [ %[5]s ] autoscale { - enabled = true - min = 1 - max = 2 + enabled = false + min = 0 + max = 0 + } + + label { + key = "foo" + value = "bar" + } + + taint { + key = "foo" + effect = "NoSchedule" + value = "bar" } + + software_code = data.ncloud_nks_server_images.image.images.0.value +`, nksInfo.Region, name, nodeCount, nksInfo.K8sVersion, *nksInfo.PrivateSubnetList[0].SubnetNo)) + if nksInfo.HypervisorCode == "KVM" { + b.WriteString(` + server_spec_code = data.ncloud_nks_server_products.product.products.0.value + storage_size = 100 } + `) -data "ncloud_nks_node_pools" "all" { - cluster_uuid = ncloud_nks_cluster.cluster.uuid + } else { + b.WriteString(` + product_code = data.ncloud_nks_server_products.product.products.0.value } -`, testClusterName, clusterType, loginKey, version, region, productType) + `) + } + + return b.String() } diff --git a/ncloud/data_source_ncloud_nks_server_images.go b/ncloud/data_source_ncloud_nks_server_images.go index 7524c879a..07ebcadbe 100644 --- a/ncloud/data_source_ncloud_nks_server_images.go +++ b/ncloud/data_source_ncloud_nks_server_images.go @@ -19,6 +19,10 @@ func dataSourceNcloudNKSServerImages() *schema.Resource { Schema: map[string]*schema.Schema{ "filter": dataSourceFiltersSchema(), + "hypervisor_code": { + Type: schema.TypeString, + Optional: true, + }, "images": { Type: schema.TypeList, Computed: true, @@ -45,7 +49,7 @@ func dataSourceNcloudNKSServerImagesRead(d *schema.ResourceData, meta interface{ return NotSupportClassic("datasource `ncloud_nks_node_pool_server_images`") } - resources, err := getNKSServerImages(config) + resources, err := getNKSServerImages(config, d) if err != nil { return err } @@ -63,10 +67,17 @@ func dataSourceNcloudNKSServerImagesRead(d *schema.ResourceData, meta interface{ } -func getNKSServerImages(config *ProviderConfig) ([]map[string]interface{}, error) { +func getNKSServerImages(config *ProviderConfig, d *schema.ResourceData) ([]map[string]interface{}, error) { logCommonRequest("GetNKSServerImages", "") - resp, err := config.Client.vnks.V2Api.OptionServerImageGet(context.Background()) + hypervisorCode := StringPtrOrNil(d.GetOk("hypervisor_code")) + + opt := make(map[string]interface{}) + if hypervisorCode != nil { + opt["hypervisorCode"] = hypervisorCode + } + + resp, err := config.Client.vnks.V2Api.OptionServerImageGet(context.Background(), opt) if err != nil { logErrorResponse("GetNKSServerImages", err, "") diff --git a/ncloud/data_source_ncloud_nks_server_products.go b/ncloud/data_source_ncloud_nks_server_products.go index 81971b5c7..0e61f410e 100644 --- a/ncloud/data_source_ncloud_nks_server_products.go +++ b/ncloud/data_source_ncloud_nks_server_products.go @@ -31,44 +31,85 @@ func dataSourceNcloudNKSServerProducts() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "detail": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, + "cpu_count": { + Type: schema.TypeString, Computed: true, + }, + "memory_size": { + Type: schema.TypeString, + Computed: true, + }, + "gpu_count": { + Type: schema.TypeString, + Computed: true, + }, + "gpu_memory_size": { + Type: schema.TypeString, + Computed: true, + }, + "product_type": { + Type: schema.TypeString, + Computed: true, + }, + "product_code": { + Type: schema.TypeString, + Computed: true, + }, + "product_korean_desc": { + Type: schema.TypeString, + Computed: true, + }, + "product_english_desc": { + Type: schema.TypeString, + Computed: true, + }, + "detail": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Deprecated: "change in location of arguemnts", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cpu_count": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "memory_size": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "gpu_count": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "gpu_memory_size": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "product_type": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "product_code": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "product_korean_desc": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, "product_english_desc": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "change in location of arguemnts", }, }, }, @@ -133,8 +174,16 @@ func getNKSServerProducts(config *ProviderConfig, d *schema.ResourceData) ([]map for _, r := range *resp { instance := map[string]interface{}{ - "label": ncloud.StringValue(r.Detail.ProductName), - "value": ncloud.StringValue(r.Detail.ProductCode), + "label": ncloud.StringValue(r.Label), + "value": ncloud.StringValue(r.Value), + "product_type": ncloud.StringValue(r.Detail.ProductType2Code), + "product_code": ncloud.StringValue(r.Detail.ProductCode), + "product_korean_desc": ncloud.StringValue(r.Detail.ProductKoreanDesc), + "product_english_desc": ncloud.StringValue(r.Detail.ProductEnglishDesc), + "cpu_count": strconv.Itoa(int(ncloud.Int32Value(r.Detail.CpuCount))), + "memory_size": strconv.Itoa(int(ncloud.Int32Value(r.Detail.MemorySizeGb))) + "GB", + "gpu_count": strconv.Itoa(int(ncloud.Int32Value(r.Detail.GpuCount))), + "gpu_memory_size": strconv.Itoa(int(ncloud.Int32Value(r.Detail.GpuMemorySizeGb))) + "GB", "detail": []map[string]interface{}{ { "product_type": ncloud.StringValue(r.Detail.ProductType2Code), diff --git a/ncloud/nks_list.go b/ncloud/nks_list.go new file mode 100644 index 000000000..8d4ef4375 --- /dev/null +++ b/ncloud/nks_list.go @@ -0,0 +1,279 @@ +package ncloud + +import ( + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func flattenInt32ListToStringList(list []*int32) []*string { + res := make([]*string, 0) + for _, v := range list { + res = append(res, ncloud.IntString(int(ncloud.Int32Value(v)))) + } + return res +} + +func flattenNKSClusterLogInput[T *vnks.ClusterLogInput | *vnks.AuditLogDto](logInput T) []map[string]interface{} { + if logInput == nil { + return nil + } + + var audit bool + switch v := any(logInput).(type) { + case *vnks.ClusterLogInput: + audit = ncloud.BoolValue(v.Audit) + case *vnks.AuditLogDto: + audit = ncloud.BoolValue(v.Audit) + default: + return nil + } + + return []map[string]interface{}{ + { + "audit": audit, + }, + } +} +func expandNKSClusterLogInput[T *vnks.ClusterLogInput | *vnks.AuditLogDto](logList []interface{}, returnType T) T { + if len(logList) == 0 { + return nil + } + log := logList[0].(map[string]interface{}) + switch any(returnType).(type) { + case *vnks.ClusterLogInput: + return T(&vnks.ClusterLogInput{ + Audit: ncloud.Bool(log["audit"].(bool)), + }) + case *vnks.AuditLogDto: + return T(&vnks.AuditLogDto{ + Audit: ncloud.Bool(log["audit"].(bool)), + }) + default: + return nil + } + +} + +func flattenNKSClusterOIDCSpec(oidcSpec *vnks.OidcRes) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if oidcSpec == nil || !*oidcSpec.Status { + return res + } + + res = []map[string]interface{}{ + { + "issuer_url": ncloud.StringValue(oidcSpec.IssuerURL), + "client_id": ncloud.StringValue(oidcSpec.ClientId), + "username_claim": ncloud.StringValue(oidcSpec.UsernameClaim), + "username_prefix": ncloud.StringValue(oidcSpec.UsernamePrefix), + "groups_claim": ncloud.StringValue(oidcSpec.GroupsClaim), + "groups_prefix": ncloud.StringValue(oidcSpec.GroupsPrefix), + "required_claim": ncloud.StringValue(oidcSpec.RequiredClaim), + }, + } + return res +} + +func expandNKSClusterOIDCSpec(oidc []interface{}) *vnks.UpdateOidcDto { + res := &vnks.UpdateOidcDto{Status: ncloud.Bool(false)} + if len(oidc) == 0 { + return res + } + + oidcSpec := oidc[0].(map[string]interface{}) + if oidcSpec["issuer_url"].(string) != "" && oidcSpec["client_id"].(string) != "" { + res.Status = ncloud.Bool(true) + res.IssuerURL = ncloud.String(oidcSpec["issuer_url"].(string)) + res.ClientId = ncloud.String(oidcSpec["client_id"].(string)) + + usernameClaim, ok := oidcSpec["username_claim"] + if ok { + res.UsernameClaim = ncloud.String(usernameClaim.(string)) + } + usernamePrefix, ok := oidcSpec["username_prefix"] + if ok { + res.UsernamePrefix = ncloud.String(usernamePrefix.(string)) + } + groupsClaim, ok := oidcSpec["groups_claim"] + if ok { + res.GroupsClaim = ncloud.String(groupsClaim.(string)) + } + groupsPrefix, ok := oidcSpec["groups_prefix"] + if ok { + res.GroupsPrefix = ncloud.String(groupsPrefix.(string)) + } + requiredClaims, ok := oidcSpec["required_claim"] + if ok { + res.RequiredClaim = ncloud.String(requiredClaims.(string)) + } + } + + return res +} + +func flattenNKSClusterIPAclEntries(ipAcl *vnks.IpAclsRes) *schema.Set { + + ipAclList := schema.NewSet(schema.HashResource(resourceNcloudNKSCluster().Schema["ip_acl"].Elem.(*schema.Resource)), []interface{}{}) + + for _, entry := range ipAcl.Entries { + m := map[string]interface{}{ + "action": *entry.Action, + "address": *entry.Address, + } + if entry.Comment != nil { + m["comment"] = *entry.Comment + } + ipAclList.Add(m) + } + + return ipAclList + +} + +func expandNKSClusterIPAcl(acl interface{}) []*vnks.IpAclsEntriesDto { + if acl == nil { + return []*vnks.IpAclsEntriesDto{} + } + + set := acl.(*schema.Set) + res := make([]*vnks.IpAclsEntriesDto, 0) + for _, raw := range set.List() { + entry := raw.(map[string]interface{}) + + add := &vnks.IpAclsEntriesDto{ + Address: ncloud.String(entry["address"].(string)), + Action: ncloud.String(entry["action"].(string)), + } + if comment, exist := entry["comment"].(string); exist { + add.Comment = ncloud.String(comment) + } + res = append(res, add) + } + + return res +} + +func flattenNKSNodePoolTaints(taints []*vnks.NodePoolTaint) *schema.Set { + + res := schema.NewSet(schema.HashResource(resourceNcloudNKSNodePool().Schema["taint"].Elem.(*schema.Resource)), []interface{}{}) + + for _, taint := range taints { + m := map[string]interface{}{ + "key": *taint.Key, + "effect": *taint.Effect, + "value": *taint.Value, + } + res.Add(m) + } + + return res + +} + +func expandNKSNodePoolTaints(taints interface{}) []*vnks.NodePoolTaint { + if taints == nil { + return nil + } + + set := taints.(*schema.Set) + res := make([]*vnks.NodePoolTaint, 0) + for _, raw := range set.List() { + taint := raw.(map[string]interface{}) + + add := &vnks.NodePoolTaint{ + Key: ncloud.String(taint["key"].(string)), + Effect: ncloud.String(taint["effect"].(string)), + Value: ncloud.String(taint["value"].(string)), + } + + res = append(res, add) + } + + return res +} + +func flattenNKSNodePoolLabels(labels []*vnks.NodePoolLabel) *schema.Set { + + res := schema.NewSet(schema.HashResource(resourceNcloudNKSNodePool().Schema["label"].Elem.(*schema.Resource)), []interface{}{}) + + for _, label := range labels { + m := map[string]interface{}{ + "key": *label.Key, + "value": *label.Value, + } + res.Add(m) + } + + return res + +} + +func expandNKSNodePoolLabels(labels interface{}) []*vnks.NodePoolLabel { + if labels == nil { + return nil + } + + set := labels.(*schema.Set) + res := make([]*vnks.NodePoolLabel, 0) + for _, raw := range set.List() { + labels := raw.(map[string]interface{}) + + add := &vnks.NodePoolLabel{ + Key: ncloud.String(labels["key"].(string)), + Value: ncloud.String(labels["value"].(string)), + } + + res = append(res, add) + } + + return res +} + +func flattenNKSNodePoolAutoScale(ao *vnks.AutoscaleOption) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if ao == nil { + return res + } + m := map[string]interface{}{ + "enabled": ncloud.BoolValue(ao.Enabled), + "min": ncloud.Int32Value(ao.Min), + "max": ncloud.Int32Value(ao.Max), + } + res = append(res, m) + return res +} + +func expandNKSNodePoolAutoScale(as []interface{}) *vnks.AutoscalerUpdate { + if len(as) == 0 { + return nil + } + autoScale := as[0].(map[string]interface{}) + return &vnks.AutoscalerUpdate{ + Enabled: ncloud.Bool(autoScale["enabled"].(bool)), + Min: ncloud.Int32(int32(autoScale["min"].(int))), + Max: ncloud.Int32(int32(autoScale["max"].(int))), + } +} + +func flattenNKSWorkerNodes(wns []*vnks.WorkerNode) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if wns == nil { + return res + } + for _, wn := range wns { + m := map[string]interface{}{ + "name": ncloud.StringValue(wn.Name), + "instance_no": ncloud.Int32Value(wn.Id), + "spec": ncloud.StringValue(wn.ServerSpec), + "private_ip": ncloud.StringValue(wn.PrivateIp), + "public_ip": ncloud.StringValue(wn.PublicIp), + "node_status": ncloud.StringValue(wn.K8sStatus), + "container_version": ncloud.StringValue(wn.DockerVersion), + "kernel_version": ncloud.StringValue(wn.KernelVersion), + } + res = append(res, m) + } + + return res +} diff --git a/ncloud/nks_list_test.go b/ncloud/nks_list_test.go new file mode 100644 index 000000000..d0cfad367 --- /dev/null +++ b/ncloud/nks_list_test.go @@ -0,0 +1,480 @@ +package ncloud + +import ( + "reflect" + "testing" + + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func TestFlattenInt32ListToStringList(t *testing.T) { + initialList := []*int32{ + ncloud.Int32(int32(1111)), + ncloud.Int32(int32(2222)), + ncloud.Int32(int32(3333)), + } + + stringList := flattenInt32ListToStringList(initialList) + expected := []*string{ + ncloud.String("1111"), + ncloud.String("2222"), + ncloud.String("3333")} + if !reflect.DeepEqual(stringList, expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + stringList, + expected) + } +} + +func TestFlattenNKSClusterLogInput(t *testing.T) { + logInput := &vnks.ClusterLogInput{Audit: ncloud.Bool(true)} + + result := flattenNKSClusterLogInput(logInput) + + if result == nil { + t.Fatal("result was nil") + } + + r := result[0] + if r["audit"].(bool) != true { + t.Fatalf("expected result enabled to be true, but was %v", r["enabled"]) + } +} + +func TestExpandNKSClusterLogInput(t *testing.T) { + log := []interface{}{ + map[string]interface{}{ + "audit": false, + }, + } + + result := expandNKSClusterLogInput(log, &vnks.AuditLogDto{}) + + if result == nil { + t.Fatal("result was nil") + } + + if ncloud.BoolValue(result.Audit) != false { + t.Fatalf("expected false , but got %v", ncloud.BoolValue(result.Audit)) + } +} + +func TestFlattenNKSClusterOIDCSpec(t *testing.T) { + oidcSpec := &vnks.OidcRes{ + Status: ncloud.Bool(true), + UsernameClaim: ncloud.String("email"), + UsernamePrefix: ncloud.String("username:"), + IssuerURL: ncloud.String("https://sso.ntruss.com/iss"), + ClientId: ncloud.String("testClient"), + GroupsPrefix: ncloud.String("groups:"), + GroupsClaim: ncloud.String("group"), + RequiredClaim: ncloud.String("iss=https://sso.ntruss.com/iss"), + } + + result := flattenNKSClusterOIDCSpec(oidcSpec) + + if len(result) == 0 { + t.Fatal("empty result") + } + + r := result[0] + + if r["username_claim"].(string) != "email" { + t.Fatalf("expected result username_claim to be 'email', but was %v", r["username_claim"]) + } + + if r["username_prefix"].(string) != "username:" { + t.Fatalf("expected result username_prefix to be 'username:', but was %v", r["username_prefix"]) + } + + if r["issuer_url"].(string) != "https://sso.ntruss.com/iss" { + t.Fatalf("expected result issuer_url to be 'https://sso.ntruss.com/iss', but was %v", r["issuer_url"]) + } + + if r["client_id"].(string) != "testClient" { + t.Fatalf("expected result client_id to be 'testClient', but was %v", r["client_id"]) + } + + if r["groups_claim"].(string) != "group" { + t.Fatalf("expected result groups_claim to be 'group', but was %v", r["groups_claim"]) + } + + if r["groups_prefix"].(string) != "groups:" { + t.Fatalf("expected result groups_prefix to be 'groups:', but was %v", r["groups_prefix"]) + } + + if r["required_claim"].(string) != "iss=https://sso.ntruss.com/iss" { + t.Fatalf("expected result groups_prefix to be 'iss=https://sso.ntruss.com/iss', but was %v", r["required_claim"]) + } +} + +func TestExpandNKSClusterOIDCSpec(t *testing.T) { + oidc := []interface{}{ + map[string]interface{}{ + "issuer_url": "https://sso.ntruss.com/iss", + "client_id": "testClient", + "username_claim": "email", + "username_prefix": "username:", + "groups_claim": "group", + "groups_prefix": "groups:", + "required_claim": "iss=https://sso.ntruss.com/iss", + }, + } + + result := expandNKSClusterOIDCSpec(oidc) + + if result == nil { + t.Fatal("result was nil") + } + + expected := &vnks.UpdateOidcDto{ + Status: ncloud.Bool(true), + IssuerURL: ncloud.String("https://sso.ntruss.com/iss"), + ClientId: ncloud.String("testClient"), + UsernameClaim: ncloud.String("email"), + UsernamePrefix: ncloud.String("username:"), + GroupsClaim: ncloud.String("group"), + GroupsPrefix: ncloud.String("groups:"), + RequiredClaim: ncloud.String("iss=https://sso.ntruss.com/iss"), + } + + if !reflect.DeepEqual(result, expected) { + t.Fatalf("expected %v , but got %v", expected, result) + } +} + +func TestFlattenNKSClusterIPAcl(t *testing.T) { + ipAcl := &vnks.IpAclsRes{ + DefaultAction: ncloud.String("deny"), + Entries: []*vnks.IpAclsEntriesRes{ + {Address: ncloud.String("10.0.1.0/24"), + Action: ncloud.String("allow"), + Comment: ncloud.String("master ip"), + }, + }, + } + + result := flattenNKSClusterIPAclEntries(ipAcl) + + if len(result.List()) == 0 { + t.Fatal("empty result") + } + + r := result.List()[0] + rr := r.(map[string]interface{}) + if rr["address"].(string) != "10.0.1.0/24" { + t.Fatalf("expected result address to be '10.0.1.0/24', but was %v", rr["address"]) + } + + if rr["action"].(string) != "allow" { + t.Fatalf("expected result action to be 'allow', but was %v", rr["action"]) + } + + if rr["comment"].(string) != "master ip" { + t.Fatalf("expected result comment to be 'master ip', but was %v", rr["comment"]) + } +} + +func TestExpandNKSClusterIPAcl(t *testing.T) { + ipAclList := schema.NewSet(schema.HashResource(resourceNcloudNKSCluster().Schema["ip_acl"].Elem.(*schema.Resource)), []interface{}{}) + + ipAclList.Add(map[string]interface{}{ + "action": "allow", + "address": "10.0.1.0/24", + "comment": "master ip", + }) + + result := expandNKSClusterIPAcl(ipAclList) + + if result == nil { + t.Fatal("result was nil") + } + + expected := []*vnks.IpAclsEntriesDto{ + { + Address: ncloud.String("10.0.1.0/24"), + Action: ncloud.String("allow"), + Comment: ncloud.String("master ip"), + }, + } + + if !reflect.DeepEqual(result, expected) { + t.Fatalf("expected %v , but got %v", expected, result) + } +} + +func TestFlattenNKSNodePoolTaints(t *testing.T) { + + taints := []*vnks.NodePoolTaint{ + { + Key: ncloud.String("foo"), + Value: ncloud.String("bar"), + Effect: ncloud.String("NoExecute"), + }, + { + Key: ncloud.String("bar"), + Value: ncloud.String(""), + Effect: ncloud.String("NoSchedule"), + }, + } + + result := flattenNKSNodePoolTaints(taints) + + if len(result.List()) == 0 { + t.Fatal("empty result") + } + + r := result.List()[0] + rr := r.(map[string]interface{}) + if rr["key"].(string) != "foo" { + t.Fatalf("expected result key to be 'foo', but was %v", rr["key"]) + } + + if rr["value"].(string) != "bar" { + t.Fatalf("expected result value to be 'bar', but was %v", rr["value"]) + } + + if rr["effect"].(string) != "NoExecute" { + t.Fatalf("expected result effect to be 'NoExecute', but was %v", rr["effect"]) + } + + r = result.List()[1] + rr = r.(map[string]interface{}) + if rr["key"].(string) != "bar" { + t.Fatalf("expected result key to be 'bar', but was %v", rr["key"]) + } + + if rr["value"].(string) != "" { + t.Fatalf("expected result value to be '', but was %v", rr["value"]) + } + + if rr["effect"].(string) != "NoSchedule" { + t.Fatalf("expected result effect to be 'NoSchedule', but was %v", rr["effect"]) + } + +} + +func TestExpandNKSNodePoolTaints(t *testing.T) { + taints := schema.NewSet(schema.HashResource(resourceNcloudNKSNodePool().Schema["taint"].Elem.(*schema.Resource)), []interface{}{}) + + taints.Add(map[string]interface{}{ + "key": "foo", + "value": "bar", + "effect": "NoExecute", + }) + taints.Add(map[string]interface{}{ + "key": "bar", + "value": "", + "effect": "NoSchedule", + }) + + result := expandNKSNodePoolTaints(taints) + + if result == nil { + t.Fatal("result was nil") + } + + expected := []*vnks.NodePoolTaint{ + { + Key: ncloud.String("foo"), + Value: ncloud.String("bar"), + Effect: ncloud.String("NoExecute"), + }, + { + Key: ncloud.String("bar"), + Value: ncloud.String(""), + Effect: ncloud.String("NoSchedule"), + }, + } + + if !reflect.DeepEqual(result, expected) { + t.Fatalf("expected %v , but got %v", expected, result) + } +} + +func TestFlattenNKSNodePoolLabels(t *testing.T) { + + labels := []*vnks.NodePoolLabel{ + { + Key: ncloud.String("foo"), + Value: ncloud.String("bar"), + }, + { + Key: ncloud.String("bar"), + Value: ncloud.String("foo"), + }, + } + + result := flattenNKSNodePoolLabels(labels) + + if len(result.List()) == 0 { + t.Fatal("empty result") + } + + r := result.List()[0] + rr := r.(map[string]interface{}) + if rr["key"].(string) != "foo" { + t.Fatalf("expected result key to be 'foo', but was %v", rr["key"]) + } + + if rr["value"].(string) != "bar" { + t.Fatalf("expected result value to be 'bar', but was %v", rr["value"]) + } + + r = result.List()[1] + rr = r.(map[string]interface{}) + if rr["key"].(string) != "bar" { + t.Fatalf("expected result key to be 'bar', but was %v", rr["key"]) + } + + if rr["value"].(string) != "foo" { + t.Fatalf("expected result value to be 'foo', but was %v", rr["value"]) + } + +} + +func TestExpandNKSNodePoolLabels(t *testing.T) { + labels := schema.NewSet(schema.HashResource(resourceNcloudNKSNodePool().Schema["label"].Elem.(*schema.Resource)), []interface{}{}) + + labels.Add(map[string]interface{}{ + "key": "foo", + "value": "bar", + }) + labels.Add(map[string]interface{}{ + "key": "bar", + "value": "foo", + }) + + result := expandNKSNodePoolLabels(labels) + + if result == nil { + t.Fatal("result was nil") + } + + expected := []*vnks.NodePoolLabel{ + { + Key: ncloud.String("foo"), + Value: ncloud.String("bar"), + }, + { + Key: ncloud.String("bar"), + Value: ncloud.String("foo"), + }, + } + + if !reflect.DeepEqual(result, expected) { + t.Fatalf("expected %v , but got %v", expected, result) + } +} + +func TestFlattenNKSNodePoolAutoscale(t *testing.T) { + expanded := &vnks.AutoscaleOption{ + Enabled: ncloud.Bool(true), + Max: ncloud.Int32(2), + Min: ncloud.Int32(2), + } + + result := flattenNKSNodePoolAutoScale(expanded) + + if result == nil { + t.Fatal("result was nil") + } + + r := result[0] + if r["enabled"].(bool) != true { + t.Fatalf("expected result enabled to be true, but was %v", r["enabled"]) + } + + if r["min"].(int32) != 2 { + t.Fatalf("expected result min to be 2, but was %d", r["min"]) + } + + if r["max"].(int32) != 2 { + t.Fatalf("expected result max to be 2, but was %d", r["max"]) + } +} + +func TestFlattenNKSWorkerNodes(t *testing.T) { + expanded := []*vnks.WorkerNode{ + { + Id: ncloud.Int32(1), + Name: ncloud.String("node1"), + ServerSpec: ncloud.String("[Standard] vCPU 2EA, Memory 8GB"), + PrivateIp: ncloud.String("10.0.1.4"), + PublicIp: ncloud.String(""), + K8sStatus: ncloud.String("Ready"), + DockerVersion: ncloud.String("containerd://1.3.7"), + KernelVersion: ncloud.String("5.4.0-65-generic"), + }, + } + + result := flattenNKSWorkerNodes(expanded) + + if result == nil { + t.Fatal("result was nil") + } + + r := result[0] + if r["instance_no"].(int32) != 1 { + t.Fatalf("expected result instance_no to be 1, but was %v", r["instance_no"]) + } + + if r["name"].(string) != "node1" { + t.Fatalf("expected result name to be node1, but was %s", r["name"]) + } + + if r["spec"].(string) != "[Standard] vCPU 2EA, Memory 8GB" { + t.Fatalf("expected result spec to be [Standard] vCPU 2EA, Memory 8GB, but was %s", r["spec"]) + } + + if r["private_ip"].(string) != "10.0.1.4" { + t.Fatalf("expected result private_ip to be 10.0.1.4, but was %s", r["private_ip"]) + } + + if r["public_ip"].(string) != "" { + t.Fatalf("expected result public_ip to be emtpy, but was %s", r["public_ip"]) + } + + if r["node_status"].(string) != "Ready" { + t.Fatalf("expected result node_status to be Ready, but was %s", r["node_status"]) + } + + if r["container_version"].(string) != "containerd://1.3.7" { + t.Fatalf("expected result container_version to be containerd://1.3.7, but was %s", r["container_version"]) + } + + if r["kernel_version"].(string) != "5.4.0-65-generic" { + t.Fatalf("expected result kernel_version to be 5.4.0-65-generic, but was %s", r["kernel_version"]) + } +} + +func TestExpandNKSNodePoolAutoScale(t *testing.T) { + autoscaleList := []interface{}{ + map[string]interface{}{ + "enabled": true, + "min": 2, + "max": 2, + }, + } + + result := expandNKSNodePoolAutoScale(autoscaleList) + + if result == nil { + t.Fatal("result was nil") + } + + if ncloud.BoolValue(result.Enabled) != true { + t.Fatalf("expected result true, but got %v", ncloud.BoolValue(result.Enabled)) + } + + if ncloud.Int32Value(result.Min) != int32(2) { + t.Fatalf("expected result 2, but got %d", ncloud.Int32Value(result.Min)) + } + + if ncloud.Int32Value(result.Max) != int32(2) { + t.Fatalf("expected result 2, but got %d", ncloud.Int32Value(result.Max)) + } +} diff --git a/ncloud/resource_ncloud_nks_cluster.go b/ncloud/resource_ncloud_nks_cluster.go index c842eb041..5a3ace891 100644 --- a/ncloud/resource_ncloud_nks_cluster.go +++ b/ncloud/resource_ncloud_nks_cluster.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "log" - "os" "strconv" "strings" "time" @@ -46,9 +45,24 @@ func resourceNcloudNKSCluster() *schema.Resource { }, CustomizeDiff: customdiff.All( customdiff.ForceNewIfChange("subnet_no_list", func(ctx context.Context, old, new, meta any) bool { - _, removed := getSubnetDiff(old, new) + _, removed, _ := getSubnetDiff(old, new) return len(removed) > 0 }), + customdiff.ValidateValue("ip_acl_default_action", func(ctx context.Context, value, meta interface{}) error { + config := meta.(*ProviderConfig) + if value != "" && checkFinSite(config) { + return fmt.Errorf("ip_acl_default_action is not supported on fin site") + } + return nil + }), + customdiff.ValidateValue("ip_acl", func(ctx context.Context, value, meta interface{}) error { + set := value.(*schema.Set) + config := meta.(*ProviderConfig) + if set.Len() > 0 && checkFinSite(config) { + return fmt.Errorf("ip_acl is not supported on fin site") + } + return nil + }), ), Schema: map[string]*schema.Schema{ "uuid": { @@ -66,6 +80,12 @@ func resourceNcloudNKSCluster() *schema.Resource { Required: true, ForceNew: true, }, + "hypervisor_code": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, "endpoint": { Type: schema.TypeString, Computed: true, @@ -106,12 +126,10 @@ func resourceNcloudNKSCluster() *schema.Resource { "lb_private_subnet_no": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "lb_public_subnet_no": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "kube_network_plugin": { Type: schema.TypeString, @@ -177,7 +195,7 @@ func resourceNcloudNKSCluster() *schema.Resource { "ip_acl_default_action": { Type: schema.TypeString, Optional: true, - Default: "allow", + Computed: true, ValidateDiagFunc: ToDiagFunc(validation.StringInSlice([]string{"allow", "deny"}, false)), }, "ip_acl": { @@ -216,6 +234,7 @@ func resourceNcloudNKSClusterCreate(ctx context.Context, d *schema.ResourceData, //Required Name: StringPtrOrNil(d.GetOk("name")), ClusterType: StringPtrOrNil(d.GetOk("cluster_type")), + HypervisorCode: StringPtrOrNil(d.GetOk("hypervisor_code")), LoginKeyName: StringPtrOrNil(d.GetOk("login_key_name")), K8sVersion: StringPtrOrNil(d.GetOk("k8s_version")), ZoneCode: StringPtrOrNil(d.GetOk("zone")), @@ -242,12 +261,14 @@ func resourceNcloudNKSClusterCreate(ctx context.Context, d *schema.ResourceData, oidcReq = expandNKSClusterOIDCSpec(oidc.([]interface{})) } - ipAclReq := &vnks.IpAclsDto{ - DefaultAction: StringPtrOrNil(d.GetOk("ip_acl_default_action")), - Entries: []*vnks.IpAclsEntriesDto{}, - } - if ipAcl, ok := d.GetOk("ip_acl"); ok { - ipAclReq.Entries = expandNKSClusterIPAcl(ipAcl) + var ipAclReq *vnks.IpAclsDto + ipAclDefaultAction, ipAclDefaultActionExist := d.GetOk("ip_acl_default_action") + ipAcl, ipAclExist := d.GetOk("ip_acl") + if ipAclDefaultActionExist || ipAclExist { + ipAclReq = &vnks.IpAclsDto{ + DefaultAction: StringPtrOrNil(ipAclDefaultAction, ipAclDefaultActionExist), + Entries: expandNKSClusterIPAcl(ipAcl), + } } logCommonRequest("resourceNcloudNKSClusterCreate", reqParams) @@ -264,20 +285,7 @@ func resourceNcloudNKSClusterCreate(ctx context.Context, d *schema.ResourceData, } d.SetId(uuid) - if (ncloud.StringValue(ipAclReq.DefaultAction) != "allow" || len(ipAclReq.Entries) > 0) && !checkFinSite(config) { - _, err = config.Client.vnks.V2Api.ClustersUuidIpAclPatch(ctx, ipAclReq, resp.Uuid) - if err != nil { - logErrorResponse("resourceNcloudNKSClusterCreate:ipAcl", err, ipAclReq) - return diag.FromErr(err) - } - } - if oidcReq != nil { - - if err = waitForNKSClusterActive(ctx, d, config, uuid); err != nil { - return diag.FromErr(err) - } - _, err = config.Client.vnks.V2Api.ClustersUuidOidcPatch(ctx, oidcReq, resp.Uuid) if err != nil { logErrorResponse("resourceNcloudNKSClusterCreate:oidc", err, oidcReq) @@ -290,6 +298,14 @@ func resourceNcloudNKSClusterCreate(ctx context.Context, d *schema.ResourceData, } } + if ipAclReq != nil && !checkFinSite(config) { + _, err = config.Client.vnks.V2Api.ClustersUuidIpAclPatch(ctx, ipAclReq, resp.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterCreate:ipAcl", err, ipAclReq) + return diag.FromErr(err) + } + } + return resourceNcloudNKSClusterRead(ctx, d, meta) } @@ -323,6 +339,7 @@ func resourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, m d.Set("uuid", cluster.Uuid) d.Set("name", cluster.Name) d.Set("cluster_type", cluster.ClusterType) + d.Set("hypervisor_code", cluster.HypervisorCode) d.Set("endpoint", cluster.Endpoint) d.Set("login_key_name", cluster.LoginKeyName) d.Set("k8s_version", cluster.K8sVersion) @@ -378,12 +395,6 @@ func resourceNcloudNKSClusterUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChanges("k8s_version") { - - if err = waitForNKSClusterActive(ctx, d, config, *cluster.Uuid); err != nil { - return diag.FromErr(err) - } - - // Cluster UPGRADE newVersion := StringPtrOrNil(d.GetOk("k8s_version")) _, err := config.Client.vnks.V2Api.ClustersUuidUpgradePatch(ctx, cluster.Uuid, newVersion, map[string]interface{}{}) if err != nil { @@ -398,11 +409,6 @@ func resourceNcloudNKSClusterUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChanges("oidc") { - - if err = waitForNKSClusterActive(ctx, d, config, *cluster.Uuid); err != nil { - return diag.FromErr(err) - } - var oidcSpec *vnks.UpdateOidcDto oidc, _ := d.GetOk("oidc") oidcSpec = expandNKSClusterOIDCSpec(oidc.([]interface{})) @@ -453,10 +459,32 @@ func resourceNcloudNKSClusterUpdate(ctx context.Context, d *schema.ResourceData, } + if d.HasChanges("lb_private_subnet_no") { + + lbPrivateSubnetNo, _ := strconv.Atoi(d.Get("lb_private_subnet_no").(string)) + _, err = config.Client.vnks.V2Api.ClustersUuidLbSubnetPatch(ctx, cluster.Uuid, ncloud.Int32(int32(lbPrivateSubnetNo)), map[string]interface{}{"igwYn": ncloud.String("N")}) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterLbPrivateSubnetPatch", err, lbPrivateSubnetNo) + return diag.FromErr(err) + } + + } + + if d.HasChanges("lb_public_subnet_no") { + + lbPrivateSubnetNo, _ := strconv.Atoi(d.Get("lb_public_subnet_no").(string)) + _, err = config.Client.vnks.V2Api.ClustersUuidLbSubnetPatch(ctx, cluster.Uuid, ncloud.Int32(int32(lbPrivateSubnetNo)), map[string]interface{}{"igwYn": ncloud.String("Y")}) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterLbPublicSubnetPatch", err, lbPrivateSubnetNo) + return diag.FromErr(err) + } + + } + if d.HasChanges("subnet_no_list") { oldList, newList := d.GetChange("subnet_no_list") - added, _ := getSubnetDiff(oldList, newList) + added, _, _ := getSubnetDiff(oldList, newList) subnets := &vnks.AddSubnetDto{ Subnets: []*vnks.SubnetDto{}, @@ -601,18 +629,21 @@ func getNKSClusters(ctx context.Context, config *ProviderConfig) ([]*vnks.Cluste return resp.Clusters, nil } -func getSubnetDiff(oldList interface{}, newList interface{}) (added []*int32, removed []*int32) { +func getSubnetDiff(oldList interface{}, newList interface{}) (added []*int32, removed []*int32, autoSelect bool) { oldMap := make(map[string]int) newMap := make(map[string]int) + autoSelect = true for _, v := range expandStringInterfaceList(oldList.(([]interface{}))) { oldMap[*v] += 1 + autoSelect = false } + for _, v := range expandStringInterfaceList(newList.(([]interface{}))) { newMap[*v] += 1 } - for subnet, _ := range oldMap { + for subnet := range oldMap { if _, exist := newMap[subnet]; !exist { intV, err := strconv.Atoi(subnet) if err == nil { @@ -621,7 +652,7 @@ func getSubnetDiff(oldList interface{}, newList interface{}) (added []*int32, re } } - for subnet, _ := range newMap { + for subnet := range newMap { if _, exist := oldMap[subnet]; !exist { intV, err := strconv.Atoi(subnet) if err == nil { @@ -632,10 +663,6 @@ func getSubnetDiff(oldList interface{}, newList interface{}) (added []*int32, re return } -func checkFinSite(config *ProviderConfig) (result bool) { - ncloudApiGw := os.Getenv("NCLOUD_API_GW") - if config.Site == "fin" || strings.HasSuffix(ncloudApiGw, "apigw.fin-ntruss.com") { - result = true - } - return +func checkFinSite(config *ProviderConfig) bool { + return strings.HasPrefix(config.RegionCode, "F") } diff --git a/ncloud/resource_ncloud_nks_cluster_test.go b/ncloud/resource_ncloud_nks_cluster_test.go index ed545c744..56e12a2f4 100644 --- a/ncloud/resource_ncloud_nks_cluster_test.go +++ b/ncloud/resource_ncloud_nks_cluster_test.go @@ -1,15 +1,18 @@ package ncloud import ( + "bytes" "context" "fmt" - "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" - "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" "os" "regexp" "strings" "testing" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vpc" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -18,13 +21,41 @@ import ( // Create LoginKey Before NKS Test const TF_TEST_NKS_LOGIN_KEY = "tf-test-nks-login-key" -func TestAccResourceNcloudNKSCluster_basic(t *testing.T) { - var cluster vnks.Cluster +type NKSTestInfo struct { + Vpc *vpc.Vpc + DefaultAcl *vpc.NetworkAcl + PrivateSubnetList []*vpc.Subnet + PublicSubnetList []*vpc.Subnet + PrivateLbSubnetList []*vpc.Subnet + PublicLbSubnetList []*vpc.Subnet + Region string + ClusterType string + ProductType string + K8sVersion string + UpgradeK8sVersion string + HypervisorCode string + IsFin bool + IsCaaS bool + needPublicLb bool +} + +func validateAcctestEnvironment(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("Accetance Test skipped unless env 'TF_ACC' is set") + } +} + +func TestAccResourceNcloudNKSCluster_basic_XEN(t *testing.T) { + validateAcctestEnvironment(t) + name := getTestClusterName() resourceName := "ncloud_nks_cluster.cluster" - region, clusterType, _, k8sVersion := getRegionAndNKSType() + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -32,24 +63,8 @@ func TestAccResourceNcloudNKSCluster_basic(t *testing.T) { CheckDestroy: testAccCheckNKSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "name", name), - resource.TestCheckResourceAttr(resourceName, "cluster_type", clusterType), - resource.TestMatchResourceAttr(resourceName, "k8s_version", regexp.MustCompile(k8sVersion)), - resource.TestCheckResourceAttr(resourceName, "login_key_name", TF_TEST_NKS_LOGIN_KEY), - resource.TestCheckResourceAttr(resourceName, "zone", fmt.Sprintf("%s-1", region)), - resource.TestMatchResourceAttr(resourceName, "vpc_no", regexp.MustCompile(`^\d+$`)), - resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.ncp.gimmetm.net/realms/nks"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.ncp.gimmetm.net/realms/nks"), - ), + Config: testAccResourceNcloudNKSClusterDefaultConfig(name, TF_TEST_NKS_LOGIN_KEY, true, nksInfo), + Check: testAccResourceNcloudNKSClusterDefaultConfigCheck(resourceName, name, nksInfo), }, { ResourceName: resourceName, @@ -60,177 +75,16 @@ func TestAccResourceNcloudNKSCluster_basic(t *testing.T) { }) } -func TestAccResourceNcloudNKSCluster_public_network(t *testing.T) { - var cluster vnks.Cluster - name := getTestClusterName() - resourceName := "ncloud_nks_cluster.cluster" - - region, clusterType, _, k8sVersion := getRegionAndNKSType() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSClusterPublicNetworkConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "name", name), - resource.TestCheckResourceAttr(resourceName, "cluster_type", clusterType), - resource.TestMatchResourceAttr(resourceName, "k8s_version", regexp.MustCompile(k8sVersion)), - resource.TestCheckResourceAttr(resourceName, "login_key_name", TF_TEST_NKS_LOGIN_KEY), - resource.TestCheckResourceAttr(resourceName, "public_network", "true"), - resource.TestCheckResourceAttr(resourceName, "zone", fmt.Sprintf("%s-1", region)), - resource.TestMatchResourceAttr(resourceName, "vpc_no", regexp.MustCompile(`^\d+$`)), - ), - }, - }, - }) -} +func TestAccResourceNcloudNKSCluster_public_network_XEN(t *testing.T) { + validateAcctestEnvironment(t) -func TestAccResourceNcloudNKSCluster_InvalidSubnet(t *testing.T) { name := getTestClusterName() - - region, clusterType, _, k8sVersion := getRegionAndNKSType() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSCluster_InvalidSubnetConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region), - ExpectError: regexp.MustCompile(getInvalidSubnetExpectError()), - }, - }, - }) -} - -func TestAccResourceNcloudNKSCluster_Update(t *testing.T) { - var cluster vnks.Cluster - name := "m3-" + getTestClusterName() - - region, clusterType, _, k8sVersion := getRegionAndNKSType() - resourceName := "ncloud_nks_cluster.cluster" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSCluster_NoOIDCSpec(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "oidc.#", "0"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "3"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, "1.25.8-nks.1", TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), - ), - }, - }, - }) -} - -func TestAccResourceNcloudNKSCluster_UpdateOnce(t *testing.T) { - var cluster vnks.Cluster - name := "m3-" + getTestClusterName() - - region, clusterType, _, k8sVersion := getRegionAndNKSType() - resourceName := "ncloud_nks_cluster.cluster" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, "1.25.8-nks.1", TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), - resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "3"), - resource.TestCheckResourceAttr(resourceName, "oidc.#", "0"), - resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), - ), - }, - }, - }) -} - -func TestAccResourceNcloudNKSCluster_VersionUpgrade(t *testing.T) { - var cluster vnks.Cluster - name := "m3-" + getTestClusterName() - - region, clusterType, _, k8sVersion := getRegionAndNKSType() resourceName := "ncloud_nks_cluster.cluster" - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, "1.25.8-nks.1", TF_TEST_NKS_LOGIN_KEY, region, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), - ), - }, - }, - }) -} - -func TestAccResourceNcloudNKSCluster_OIDCSpec(t *testing.T) { - var cluster vnks.Cluster - name := getTestClusterName() - - region, clusterType, _, k8sVersion := getRegionAndNKSType() - resourceName := "ncloud_nks_cluster.cluster" + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -238,81 +92,23 @@ func TestAccResourceNcloudNKSCluster_OIDCSpec(t *testing.T) { CheckDestroy: testAccCheckNKSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.ncp.gimmetm.net/realms/nks"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.ncp.gimmetm.net/realms/nks"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSCluster_NoOIDCSpec(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "oidc.#", "0"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.ncp.gimmetm.net/realms/nks"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), - resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.ncp.gimmetm.net/realms/nks"), - ), + Config: testAccResourceNcloudNKSClusterPublicNetworkConfig(name, TF_TEST_NKS_LOGIN_KEY, nksInfo), + Check: testAccResourceNcloudNKSClusterPublicNetworkConfigCheck(name, resourceName, nksInfo), }, }, }) } -func TestAccResourceNcloudNKSCluster_AuditLog(t *testing.T) { - var cluster vnks.Cluster - name := getTestClusterName() +func TestAccResourceNcloudNKSCluster_Update_XEN(t *testing.T) { + validateAcctestEnvironment(t) - region, clusterType, _, k8sVersion := getRegionAndNKSType() + name := fmt.Sprintf("m3-%s", getTestClusterName()) resourceName := "ncloud_nks_cluster.cluster" - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), - ), - }, - }, - }) -} - -func TestAccResourceNcloudNKSCluster_AddSubnet(t *testing.T) { - var cluster vnks.Cluster - name := getTestClusterName() - - region, clusterType, _, k8sVersion := getRegionAndNKSType() - resourceName := "ncloud_nks_cluster.cluster" + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -320,325 +116,207 @@ func TestAccResourceNcloudNKSCluster_AddSubnet(t *testing.T) { CheckDestroy: testAccCheckNKSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - ), + Config: testAccResourceNcloudNKSClusterDefaultConfig(name, TF_TEST_NKS_LOGIN_KEY, true, nksInfo), + Check: testAccResourceNcloudNKSClusterDefaultConfigCheck(resourceName, name, nksInfo), Destroy: false, }, { - Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "3"), - ), + Config: testAccResourceNcloudNKSClusterUpdateConfig(name, TF_TEST_NKS_LOGIN_KEY, false, nksInfo), + Check: testAccResourceNcloudNKSClusterUpdateConfigCheck(resourceName, nksInfo), Destroy: false, }, }, }) } -func testAccResourceNcloudNKSClusterConfig(name string, clusterType string, k8sVersion string, loginKeyName string, region string, auditLog bool) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" -} - +func testAccResourceNcloudNKSClusterDefaultConfig(name string, loginKeyName string, auditLog bool, nksInfo *NKSTestInfo) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" k8s_version = "%[3]s" login_key_name = "%[4]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" + vpc_no = %[8]s + zone = "%[9]s-1" log { - audit = %[6]t + audit = %[10]t } oidc { - issuer_url = "https://keycloak.ncp.gimmetm.net/realms/nks" + issuer_url = "https://keycloak.url/realms/nks" client_id = "nks-client" username_claim = "preferred_username" username_prefix = "oidc:" groups_claim = "groups" groups_prefix = "oidc:" - required_claim = "iss=https://keycloak.ncp.gimmetm.net/realms/nks" + required_claim = "iss=https://keycloak.url/realms/nks" } -} -`, name, clusterType, k8sVersion, loginKeyName, region, auditLog) -} - -func testAccResourceNcloudNKSClusterPublicNetworkConfig(name string, clusterType string, k8sVersion string, loginKeyName string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region, auditLog)) + + if !nksInfo.IsFin { + b.WriteString(` + ip_acl_default_action = "deny" + ip_acl { + action = "allow" + address = "223.130.195.0/24" + comment = "allow ip" + } + +`) + } -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PUBLIC" - usage_type = "GEN" -} + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PUBLIC" - usage_type = "GEN" + b.WriteString(` } - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" +`) + return b.String() } +func testAccResourceNcloudNKSClusterUpdateConfig(name string, loginKeyName string, auditLog bool, nksInfo *NKSTestInfo) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" k8s_version = "%[3]s" login_key_name = "%[4]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" kube_network_plugin = "cilium" - public_network = "true" - subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, - ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" -} -`, name, clusterType, k8sVersion, loginKeyName, region) -} - -func testAccResourceNcloudNKSCluster_InvalidSubnetConfig(name string, clusterType string, k8sVersion string, loginKeyName string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-2" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-2" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-2" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" -} - -resource "ncloud_nks_cluster" "cluster" { - name = "%[1]s" - cluster_type = "%[2]s" - k8s_version = "%[3]s" - login_key_name = "%[4]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id - kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, + %[7]s, + %[8]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" + vpc_no = %[9]s + zone = "%[10]s-1" log { - audit = true + audit = %[11]t } -} -`, name, clusterType, k8sVersion, loginKeyName, region) -} + oidc { + issuer_url = "https://keycloak.url/realms/update" + client_id = "update-client" + } +`, name, nksInfo.ClusterType, nksInfo.UpgradeK8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.PrivateSubnetList[1].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region, auditLog)) + if !nksInfo.IsFin { + b.WriteString(` + ip_acl_default_action = "allow" + +`) + } -func testAccResourceNcloudNKSCluster_NoOIDCSpec(name string, clusterType string, k8sVersion string, loginKeyName string, region string, auditLog bool) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" + b.WriteString(` } - -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" +`) + return b.String() } +func testAccResourceNcloudNKSClusterPublicNetworkConfig(name string, loginKeyName string, nksInfo *NKSTestInfo) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" k8s_version = "%[3]s" login_key_name = "%[4]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + hypervisor_code = "%[5]s" + lb_private_subnet_no = %[6]s kube_network_plugin = "cilium" + public_network = "true" subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" - log { - audit = "%[6]t" - } -} -`, name, clusterType, k8sVersion, loginKeyName, region, auditLog) -} - -func testAccResourceNcloudNKSCluster_AddSubnet(name string, clusterType string, k8sVersion string, loginKeyName string, region string, auditLog bool) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet1" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-1" - subnet = "10.2.1.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} + vpc_no = %[8]s + zone = "%[9]s-1" +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, nksInfo.HypervisorCode, *nksInfo.PrivateLbSubnetList[0].SubnetNo, *nksInfo.PublicSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region)) + + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } -resource "ncloud_subnet" "subnet2" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-2" - subnet = "10.2.2.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" + b.WriteString(` } - -resource "ncloud_subnet" "subnet3" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-3" - subnet = "10.2.4.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" +`) + return b.String() } -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[5]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" +func testAccResourceNcloudNKSClusterDefaultConfigCheck(resourceName string, name string, nksInfo *NKSTestInfo) (check resource.TestCheckFunc) { + var cluster vnks.Cluster + check = resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "cluster_type", nksInfo.ClusterType), + resource.TestMatchResourceAttr(resourceName, "k8s_version", regexp.MustCompile(nksInfo.K8sVersion)), + resource.TestCheckResourceAttr(resourceName, "login_key_name", TF_TEST_NKS_LOGIN_KEY), + resource.TestCheckResourceAttr(resourceName, "zone", fmt.Sprintf("%s-1", nksInfo.Region)), + resource.TestMatchResourceAttr(resourceName, "vpc_no", regexp.MustCompile(`^\d+$`)), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.url/realms/nks"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.url/realms/nks"), + ) + if !nksInfo.IsFin { + + check = resource.ComposeTestCheckFunc( + check, + resource.TestCheckResourceAttr(resourceName, "ip_acl_default_action", "deny"), + resource.TestCheckResourceAttr(resourceName, "ip_acl.0.action", "allow"), + resource.TestCheckResourceAttr(resourceName, "ip_acl.0.address", "223.130.195.0/24"), + resource.TestCheckResourceAttr(resourceName, "ip_acl.0.comment", "allow ip"), + ) + } + return } -resource "ncloud_nks_cluster" "cluster" { - name = "%[1]s" - cluster_type = "%[2]s" - k8s_version = "%[3]s" - login_key_name = "%[4]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id - kube_network_plugin = "cilium" - subnet_no_list = [ - ncloud_subnet.subnet1.id, - ncloud_subnet.subnet2.id, - ncloud_subnet.subnet3.id, - ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[5]s-1" - log { - audit = "%[6]t" - } -} -`, name, clusterType, k8sVersion, loginKeyName, region, auditLog) +func testAccResourceNcloudNKSClusterUpdateConfigCheck(resourceName string, nksInfo *NKSTestInfo) (check resource.TestCheckFunc) { + var cluster vnks.Cluster + return resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestMatchResourceAttr(resourceName, "k8s_version", regexp.MustCompile(nksInfo.UpgradeK8sVersion)), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.url/realms/update"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "update-client"), + resource.TestCheckResourceAttr(resourceName, "ip_acl_default_action", "allow"), + resource.TestCheckResourceAttr(resourceName, "ip_acl.#", "0"), + ) +} + +func testAccResourceNcloudNKSClusterPublicNetworkConfigCheck(name string, resourceName string, nksInfo *NKSTestInfo) (check resource.TestCheckFunc) { + var cluster vnks.Cluster + return resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "cluster_type", nksInfo.ClusterType), + resource.TestMatchResourceAttr(resourceName, "k8s_version", regexp.MustCompile(nksInfo.K8sVersion)), + resource.TestCheckResourceAttr(resourceName, "login_key_name", TF_TEST_NKS_LOGIN_KEY), + resource.TestCheckResourceAttr(resourceName, "public_network", "true"), + resource.TestCheckResourceAttr(resourceName, "zone", fmt.Sprintf("%s-1", nksInfo.Region)), + resource.TestMatchResourceAttr(resourceName, "vpc_no", regexp.MustCompile(`^\d+$`)), + resource.TestCheckResourceAttr(resourceName, "ip_acl_default_action", "allow"), + resource.TestCheckResourceAttr(resourceName, "ip_acl.#", "0"), + ) } func testAccCheckNKSClusterExists(n string, cluster *vnks.Cluster) resource.TestCheckFunc { @@ -652,7 +330,7 @@ func testAccCheckNKSClusterExists(n string, cluster *vnks.Cluster) resource.Test return fmt.Errorf("No cluster uuid is set") } - config := testAccProvider.Meta().(*ProviderConfig) + config := getTestProvider(true).Meta().(*ProviderConfig) resp, err := getNKSCluster(context.Background(), config, rs.Primary.ID) if err != nil { return err @@ -664,27 +342,194 @@ func testAccCheckNKSClusterExists(n string, cluster *vnks.Cluster) resource.Test } } -func testAccCheckNKSClusterDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*ProviderConfig) +func getNKSTestInfo(hypervisor string) (*NKSTestInfo, error) { - for _, rs := range s.RootModule().Resources { - if rs.Type != "ncloud_nks_cluster" { - continue + nksInfo := &NKSTestInfo{ + Region: os.Getenv("NCLOUD_REGION"), + K8sVersion: "1.25.15-nks.1", + UpgradeK8sVersion: "1.26.10-nks.1", + HypervisorCode: hypervisor, + } + zoneCode := ncloud.String(fmt.Sprintf("%s-1", nksInfo.Region)) + nksInfo.IsFin = strings.HasPrefix(nksInfo.Region, "F") + nksInfo.IsCaaS = nksInfo.Region[1:2] == "CS" + nksInfo.needPublicLb = true + + if nksInfo.IsFin { + nksInfo.needPublicLb = false + } + if hypervisor == "KVM" { + switch nksInfo.Region { + case "FKR": + nksInfo.ClusterType = "SVR.VNKS.STAND.C002.M008.G003" + case "PCS02": + nksInfo.ClusterType = "SVR.VNKS.STAND.C008.M032.G003" + default: + nksInfo.ClusterType = "SVR.VNKS.STAND.C002.M008.G003" + } + } else { + switch nksInfo.Region { + case "FKR": + nksInfo.ClusterType = "SVR.VNKS.STAND.C002.M008.NET.HDD.B050.G001" + default: + nksInfo.ClusterType = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002" } + } + + vpcName := ncloud.String("tf-test-vpc") + apiKeys := &ncloud.APIKey{ + AccessKey: os.Getenv("NCLOUD_ACCESS_KEY"), + SecretKey: os.Getenv("NCLOUD_SECRET_KEY"), + } + + vpcClient := vpc.NewAPIClient(vpc.NewConfiguration(apiKeys)) - clusters, err := getNKSClusters(context.Background(), config) + reqParams := &vpc.GetVpcListRequest{ + RegionCode: &nksInfo.Region, + VpcName: vpcName, + } + vpcResp, err := vpcClient.V2Api.GetVpcList(reqParams) + if err != nil { + return nil, err + } + + if len(vpcResp.VpcList) == 0 { + createVpcReq := &vpc.CreateVpcRequest{ + RegionCode: &nksInfo.Region, + VpcName: vpcName, + Ipv4CidrBlock: ncloud.String("10.0.0.0/16"), + } + createVpcResp, err := vpcClient.V2Api.CreateVpc(createVpcReq) if err != nil { - return err + return nil, err + } + nksInfo.Vpc = createVpcResp.VpcList[0] + + } else { + nksInfo.Vpc = vpcResp.VpcList[0] + } + + aclReq := &vpc.GetNetworkAclListRequest{ + RegionCode: &nksInfo.Region, + VpcNo: nksInfo.Vpc.VpcNo, + } + aclResp, err := vpcClient.V2Api.GetNetworkAclList(aclReq) + if err != nil { + return nil, err + } + for _, acl := range aclResp.NetworkAclList { + if *acl.IsDefault { + nksInfo.DefaultAcl = acl } + } + + subnetReqParams := &vpc.GetSubnetListRequest{ + VpcNo: nksInfo.Vpc.VpcNo, + RegionCode: &nksInfo.Region, + } + + subnetResp, err := vpcClient.V2Api.GetSubnetList(subnetReqParams) + if err != nil { + return nil, err + } + + for _, subnet := range subnetResp.SubnetList { + if *subnet.UsageType.Code == "GEN" && *subnet.SubnetType.Code == "PRIVATE" { + nksInfo.PrivateSubnetList = append(nksInfo.PrivateSubnetList, subnet) + } else if *subnet.UsageType.Code == "GEN" && *subnet.SubnetType.Code == "PUBLIC" { + nksInfo.PublicSubnetList = append(nksInfo.PublicSubnetList, subnet) + } else if *subnet.UsageType.Code == "LOADB" && *subnet.SubnetType.Code == "PRIVATE" { + nksInfo.PrivateLbSubnetList = append(nksInfo.PrivateLbSubnetList, subnet) + } else if *subnet.UsageType.Code == "LOADB" && *subnet.SubnetType.Code == "PUBLIC" { + nksInfo.PublicLbSubnetList = append(nksInfo.PublicLbSubnetList, subnet) + } + } - for _, cluster := range clusters { - if ncloud.StringValue(cluster.Uuid) == rs.Primary.ID { - return fmt.Errorf("Cluster still exists") + if len(nksInfo.PrivateSubnetList) == 0 { + for i := 1; i <= 2; i++ { + + createSubnetReq := &vpc.CreateSubnetRequest{ + VpcNo: nksInfo.Vpc.VpcNo, + RegionCode: &nksInfo.Region, + ZoneCode: zoneCode, + SubnetTypeCode: ncloud.String("PRIVATE"), + UsageTypeCode: ncloud.String("GEN"), + NetworkAclNo: nksInfo.DefaultAcl.NetworkAclNo, + Subnet: ncloud.String(fmt.Sprintf("10.0.%d.0/24", i)), + SubnetName: ncloud.String(fmt.Sprintf("tf-subnet-priv-%d", i)), } + + subnetResp, err := vpcClient.V2Api.CreateSubnet(createSubnetReq) + if err != nil { + return nil, err + } + + nksInfo.PrivateSubnetList = append(nksInfo.PrivateSubnetList, subnetResp.SubnetList[0]) + } + } + + if len(nksInfo.PublicSubnetList) == 0 && !nksInfo.IsCaaS { + createSubnetReq := &vpc.CreateSubnetRequest{ + VpcNo: nksInfo.Vpc.VpcNo, + RegionCode: &nksInfo.Region, + ZoneCode: zoneCode, + SubnetTypeCode: ncloud.String("PUBLIC"), + UsageTypeCode: ncloud.String("GEN"), + NetworkAclNo: nksInfo.DefaultAcl.NetworkAclNo, + Subnet: ncloud.String("10.0.10.0/24"), + SubnetName: ncloud.String("tf-subnet-pub"), + } + + subnetResp, err := vpcClient.V2Api.CreateSubnet(createSubnetReq) + if err != nil { + return nil, err } + + nksInfo.PublicSubnetList = append(nksInfo.PublicSubnetList, subnetResp.SubnetList[0]) + } + + if len(nksInfo.PrivateLbSubnetList) == 0 { + createSubnetReq := &vpc.CreateSubnetRequest{ + VpcNo: nksInfo.Vpc.VpcNo, + RegionCode: &nksInfo.Region, + ZoneCode: zoneCode, + SubnetTypeCode: ncloud.String("PRIVATE"), + UsageTypeCode: ncloud.String("LOADB"), + NetworkAclNo: nksInfo.DefaultAcl.NetworkAclNo, + Subnet: ncloud.String("10.0.100.0/24"), + SubnetName: ncloud.String("tf-subnet-lb-priv"), + } + + subnetResp, err := vpcClient.V2Api.CreateSubnet(createSubnetReq) + if err != nil { + return nil, err + } + + nksInfo.PrivateLbSubnetList = append(nksInfo.PrivateLbSubnetList, subnetResp.SubnetList[0]) } - return nil + if len(nksInfo.PublicLbSubnetList) == 0 && nksInfo.needPublicLb { + createSubnetReq := &vpc.CreateSubnetRequest{ + VpcNo: nksInfo.Vpc.VpcNo, + RegionCode: &nksInfo.Region, + ZoneCode: zoneCode, + SubnetTypeCode: ncloud.String("PUBLIC"), + UsageTypeCode: ncloud.String("LOADB"), + NetworkAclNo: nksInfo.DefaultAcl.NetworkAclNo, + Subnet: ncloud.String("10.0.101.0/24"), + SubnetName: ncloud.String("tf-subnet-lb-pub"), + } + + subnetResp, err := vpcClient.V2Api.CreateSubnet(createSubnetReq) + if err != nil { + return nil, err + } + + nksInfo.PublicLbSubnetList = append(nksInfo.PublicLbSubnetList, subnetResp.SubnetList[0]) + } + + return nksInfo, nil + } func getTestClusterName() string { @@ -692,24 +537,3 @@ func getTestClusterName() string { testClusterName := fmt.Sprintf("tf-%d-cluster", rInt) return testClusterName } - -func getRegionAndNKSType() (region string, clusterType string, productType string, k8sVersion string) { - region = os.Getenv("NCLOUD_REGION") - if region == "FKR" { - clusterType = "SVR.VNKS.STAND.C002.M008.NET.HDD.B050.G001" - productType = "SVR.VSVR.STAND.C002.M004.NET.SSD.B050.G001" - } else { - clusterType = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002" - productType = "SVR.VSVR.STAND.C002.M008.NET.SSD.B050.G002" - } - k8sVersion = "1.24.10-nks.1" - return -} - -func getInvalidSubnetExpectError() string { - apigw := os.Getenv("NCLOUD_API_GW") - if strings.Contains(apigw, "gov-ntruss.com") { - return "Not found zone" - } - return "Subnet is undefined" -} diff --git a/ncloud/resource_ncloud_nks_node_pool.go b/ncloud/resource_ncloud_nks_node_pool.go index c1fef6ad6..34cc50b93 100644 --- a/ncloud/resource_ncloud_nks_node_pool.go +++ b/ncloud/resource_ncloud_nks_node_pool.go @@ -3,6 +3,12 @@ package ncloud import ( "context" "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -10,10 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "log" - "strconv" - "strings" - "time" ) func init() { @@ -27,6 +29,7 @@ const ( NKSNodePoolStatusRotateNodeScaleOut = "ROTATE_NODE_SCALE_OUT" NKSNodePoolStatusRotateNodeScaleDown = "ROTATE_NODE_SCALE_DOWN" NKSNodePoolStatusUpgrade = "UPGRADE" + NKSNodePoolStatusUpdate = "UPDATING" NKSNodePoolIDSeparator = ":" ) @@ -58,7 +61,13 @@ func resourceNcloudNKSNodePool() *schema.Resource { } return nil }), + customdiff.ForceNewIfChange("subnet_no_list", func(ctx context.Context, old, new, meta any) bool { + // force new if removed subnet or subnet auto select(emtpy sunbnet_no_list) + _, removed, autoSelect := getSubnetDiff(old, new) + return len(removed) > 0 || autoSelect + }), ), + Schema: map[string]*schema.Schema{ "cluster_uuid": { Type: schema.TypeString, @@ -75,10 +84,12 @@ func resourceNcloudNKSNodePool() *schema.Resource { Optional: true, }, "node_pool_name": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateDiagFunc: ToDiagFunc(validation.StringLenBetween(3, 30)), + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateDiagFunc: validation.ToDiagFunc(validation.All( + validation.StringLenBetween(3, 20), + validation.StringMatch(regexp.MustCompile(`^[a-z]+[a-z0-9-]+[a-z0-9]$`), "Allows only lowercase letters(a-z), numbers, hyphen (-). Must start with an alphabetic character, must end with an English letter or number"))), }, "node_count": { Type: schema.TypeInt, @@ -87,7 +98,6 @@ func resourceNcloudNKSNodePool() *schema.Resource { "subnet_no": { Type: schema.TypeString, Optional: true, - Computed: true, ForceNew: true, Deprecated: "use 'subnet_no_list' instead", ConflictsWith: []string{"subnet_no_list"}, @@ -95,15 +105,14 @@ func resourceNcloudNKSNodePool() *schema.Resource { "subnet_no_list": { Type: schema.TypeList, Optional: true, - Computed: true, - ForceNew: true, MaxItems: 5, - MinItems: 0, + MinItems: 1, Elem: &schema.Schema{Type: schema.TypeString}, }, "product_code": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ForceNew: true, }, "software_code": { @@ -112,6 +121,17 @@ func resourceNcloudNKSNodePool() *schema.Resource { Computed: true, ForceNew: true, }, + "storage_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "server_spec_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "autoscale": { Type: schema.TypeList, Optional: true, @@ -134,6 +154,44 @@ func resourceNcloudNKSNodePool() *schema.Resource { }, }, }, + "label": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "taint": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effect": { + Type: schema.TypeString, + Required: true, + }, + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "nodes": { Type: schema.TypeList, Computed: true, @@ -189,10 +247,12 @@ func resourceNcloudNKSNodePoolCreate(ctx context.Context, d *schema.ResourceData id := NodePoolCreateResourceID(clusterUuid, nodePoolName) reqParams := &vnks.NodePoolCreationBody{ - Name: ncloud.String(nodePoolName), - NodeCount: Int32PtrOrNil(d.GetOk("node_count")), - ProductCode: StringPtrOrNil(d.GetOk("product_code")), - SoftwareCode: StringPtrOrNil(d.GetOk("software_code")), + Name: ncloud.String(nodePoolName), + NodeCount: Int32PtrOrNil(d.GetOk("node_count")), + ProductCode: StringPtrOrNil(d.GetOk("product_code")), + SoftwareCode: StringPtrOrNil(d.GetOk("software_code")), + ServerSpecCode: StringPtrOrNil(d.GetOk("server_spec_code")), + StorageSize: Int32PtrOrNil(d.GetOk("storage_size")), } if list, ok := d.GetOk("subnet_no_list"); ok { @@ -216,6 +276,63 @@ func resourceNcloudNKSNodePoolCreate(ctx context.Context, d *schema.ResourceData } d.SetId(id) + + if taints, ok := d.GetOk("taint"); ok { + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + + nodePool, err := getNKSNodePool(ctx, config, clusterUuid, nodePoolName) + if err != nil { + return diag.FromErr(err) + } + + instanceNo := strconv.Itoa(int(ncloud.Int32Value(nodePool.InstanceNo))) + + nodePoolTaintReq := &vnks.UpdateNodepoolTaintDto{ + Taints: expandNKSNodePoolTaints(taints), + } + + _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoTaintsPut(ctx, nodePoolTaintReq, &clusterUuid, &instanceNo) + if err != nil { + logErrorResponse("resourceNcloudNKSNodePoolCreate - put taints", err, nodePoolTaintReq) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSNodePoolCreate - put taints", reqParams) + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, ncloud.StringValue(reqParams.Name)); err != nil { + return diag.FromErr(err) + } + } + + if labels, ok := d.GetOk("label"); ok { + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + + nodePool, err := getNKSNodePool(ctx, config, clusterUuid, nodePoolName) + if err != nil { + return diag.FromErr(err) + } + + instanceNo := strconv.Itoa(int(ncloud.Int32Value(nodePool.InstanceNo))) + + labelsReq := &vnks.UpdateNodepoolLabelDto{ + Labels: expandNKSNodePoolLabels(labels), + } + + _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoLabelsPut(ctx, labelsReq, &clusterUuid, &instanceNo) + if err != nil { + logErrorResponse("resourceNcloudNKSNodePoolCreate - put labels", err, labelsReq) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSNodePoolCreate - put labels", reqParams) + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, ncloud.StringValue(reqParams.Name)); err != nil { + return diag.FromErr(err) + } + } + return resourceNcloudNKSNodePoolRead(ctx, d, meta) } @@ -226,6 +343,9 @@ func resourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData, } clusterUuid, nodePoolName, err := NodePoolParseResourceID(d.Id()) + if err != nil { + return diag.FromErr(err) + } nodePool, err := getNKSNodePool(ctx, config, clusterUuid, nodePoolName) if err != nil { return diag.FromErr(err) @@ -243,6 +363,8 @@ func resourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData, d.Set("software_code", nodePool.SoftwareCode) d.Set("node_count", nodePool.NodeCount) d.Set("k8s_version", nodePool.K8sVersion) + d.Set("server_spec_code", nodePool.ServerSpecCode) + d.Set("storage_size", nodePool.StorageSize) if err := d.Set("autoscale", flattenNKSNodePoolAutoScale(nodePool.Autoscale)); err != nil { log.Printf("[WARN] Error setting Autoscale set for (%s): %s", d.Id(), err) @@ -252,6 +374,16 @@ func resourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData, if err := d.Set("subnet_no_list", flattenInt32ListToStringList(nodePool.SubnetNoList)); err != nil { log.Printf("[WARN] Error setting subnet no list set for (%s): %s", d.Id(), err) } + } else { + d.Set("subnet_no_list", nil) + } + + if err := d.Set("taint", flattenNKSNodePoolTaints(nodePool.Taints)); err != nil { + log.Printf("[WARN] Error setting taints set for (%s): %s", d.Id(), err) + } + + if err := d.Set("label", flattenNKSNodePoolLabels(nodePool.Labels)); err != nil { + log.Printf("[WARN] Error setting labels set for (%s): %s", d.Id(), err) } nodes, err := getNKSNodePoolWorkerNodes(ctx, config, clusterUuid, nodePoolName) @@ -281,12 +413,6 @@ func resourceNcloudNKSNodePoolUpdate(ctx context.Context, d *schema.ResourceData k8sVersion := StringPtrOrNil(d.GetOk("k8s_version")) if d.HasChanges("k8s_version") { - - if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { - return diag.FromErr(err) - } - - // Nodepool UPGRADE _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoUpgradePatch(ctx, ncloud.String(clusterUuid), instanceNo, k8sVersion, map[string]interface{}{}) if err != nil { logErrorResponse("resourceNcloudNKSNodepoolUpgrade", err, k8sVersion) @@ -300,9 +426,6 @@ func resourceNcloudNKSNodePoolUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChanges("node_count", "autoscale") { - if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { - return diag.FromErr(err) - } reqParams := &vnks.NodePoolUpdateBody{ NodeCount: Int32PtrOrNil(d.GetOk("node_count")), } @@ -323,6 +446,61 @@ func resourceNcloudNKSNodePoolUpdate(ctx context.Context, d *schema.ResourceData } } + if d.HasChanges("taint") { + nodePoolTaintReq := &vnks.UpdateNodepoolTaintDto{ + Taints: expandNKSNodePoolTaints(d.Get("taint")), + } + + _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoTaintsPut(ctx, nodePoolTaintReq, &clusterUuid, instanceNo) + if err != nil { + logErrorResponse("resourceNcloudNKSNodePoolUpdate - put taints", err, nodePoolTaintReq) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSNodePoolUpdate - put taints", nodePoolTaintReq) + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("label") { + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + + labelsReq := &vnks.UpdateNodepoolLabelDto{ + Labels: expandNKSNodePoolLabels(d.Get("label")), + } + + _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoLabelsPut(ctx, labelsReq, &clusterUuid, instanceNo) + if err != nil { + logErrorResponse("resourceNcloudNKSNodePoolUpdate - put labels", err, labelsReq) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSNodePoolUpdate - put labels", labelsReq) + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("subnet_no_list") { + + oldList, newList := d.GetChange("subnet_no_list") + added, _, _ := getSubnetDiff(oldList, newList) + + subnetReq := &vnks.UpdateNodepoolSubnetDto{ + Subnets: added, + } + + _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoSubnetsPatch(ctx, subnetReq, &clusterUuid, instanceNo) + if err != nil { + logErrorResponse("resourceNcloudNKSNodePoolUpdate - addSubnets", err, subnetReq) + return diag.FromErr(err) + } + + } + return resourceNcloudNKSNodePoolRead(ctx, d, config) } @@ -390,7 +568,7 @@ func waitForNKSNodePoolDeletion(ctx context.Context, d *schema.ResourceData, con func waitForNKSNodePoolActive(ctx context.Context, d *schema.ResourceData, config *ProviderConfig, clusterUuid string, nodePoolName string) error { stateConf := &resource.StateChangeConf{ - Pending: []string{NKSStatusCreatingCode, NKSNodePoolStatusNodeScaleOut, NKSNodePoolStatusNodeScaleDown, NKSNodePoolStatusUpgrade, NKSNodePoolStatusRotateNodeScaleOut, NKSNodePoolStatusRotateNodeScaleDown}, + Pending: []string{NKSStatusCreatingCode, NKSNodePoolStatusNodeScaleOut, NKSNodePoolStatusNodeScaleDown, NKSNodePoolStatusUpgrade, NKSNodePoolStatusRotateNodeScaleOut, NKSNodePoolStatusRotateNodeScaleDown, NKSNodePoolStatusUpdate}, Target: []string{NKSNodePoolStatusRunCode}, Refresh: func() (result interface{}, state string, err error) { np, err := getNKSNodePool(ctx, config, clusterUuid, nodePoolName) @@ -431,6 +609,8 @@ func getNKSNodePools(ctx context.Context, config *ProviderConfig, uuid string) ( if err != nil { return nil, err } + logResponse("getNKSNodePools", resp) + return resp.NodePool, nil } @@ -445,7 +625,7 @@ func NodePoolParseResourceID(id string) (string, string, error) { if len(parts) == 2 && parts[0] != "" && parts[1] != "" { return parts[0], parts[1], nil } - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected cluster-name%[2]snode-pool-name", id, NKSNodePoolIDSeparator) + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected cluster-uuid%[2]snode-pool-name", id, NKSNodePoolIDSeparator) } func getNKSWorkerNodes(ctx context.Context, config *ProviderConfig, uuid string) ([]*vnks.WorkerNode, error) { diff --git a/ncloud/resource_ncloud_nks_node_pool_test.go b/ncloud/resource_ncloud_nks_node_pool_test.go index c67c71fec..2b0db5104 100644 --- a/ncloud/resource_ncloud_nks_node_pool_test.go +++ b/ncloud/resource_ncloud_nks_node_pool_test.go @@ -1,23 +1,29 @@ package ncloud import ( + "bytes" "context" "errors" "fmt" + "testing" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" - "regexp" - "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccResourceNcloudNKSNodePool_basic(t *testing.T) { - var nodePool vnks.NodePool +func TestAccResourceNcloudNKSNodePool_basic_XEN(t *testing.T) { + validateAcctestEnvironment(t) + clusterName := getTestClusterName() resourceName := "ncloud_nks_node_pool.node_pool" - region, clusterType, productCode, k8sVersion := getRegionAndNKSType() + + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -25,13 +31,8 @@ func TestAccResourceNcloudNKSNodePool_basic(t *testing.T) { CheckDestroy: testAccCheckNKSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSNodePoolExists(resourceName, &nodePool), - resource.TestCheckResourceAttr(resourceName, "node_pool_name", clusterName), - resource.TestCheckResourceAttr(resourceName, "node_count", "1"), - resource.TestCheckResourceAttr(resourceName, "product_code", productCode), - ), + Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, TF_TEST_NKS_LOGIN_KEY, nksInfo, 1), + Check: testAccResourceNcloudNKSNodePoolBasicCheck(resourceName, clusterName, nksInfo), }, { ResourceName: resourceName, @@ -42,277 +43,419 @@ func TestAccResourceNcloudNKSNodePool_basic(t *testing.T) { }) } -func TestAccResourceNcloudNKSNodePool_publicNetwork(t *testing.T) { - var nodePool vnks.NodePool - clusterName := getTestClusterName() - resourceName := "ncloud_nks_node_pool.node_pool" - region, clusterType, productCode, k8sVersion := getRegionAndNKSType() +func TestAccResourceNcloudNKSNodePool_Update_XEN(t *testing.T) { + validateAcctestEnvironment(t) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSNodePoolConfigPublicNetwork(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSNodePoolExists(resourceName, &nodePool), - resource.TestCheckResourceAttr(resourceName, "node_pool_name", clusterName), - resource.TestCheckResourceAttr(resourceName, "node_count", "1"), - resource.TestCheckResourceAttr(resourceName, "product_code", productCode), - ), - }, - }, - }) -} - -func TestAccResourceNcloudNKSNodePool_updateNodeCountAndAutoScale(t *testing.T) { var nodePool vnks.NodePool - clusterName := getTestClusterName() - region, clusterType, productCode, k8sVersion := getRegionAndNKSType() - resourceName := "ncloud_nks_node_pool.node_pool" - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckNKSNodePoolDestroy, - Steps: []resource.TestStep{ - { - Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSNodePoolExists(resourceName, &nodePool), - resource.TestCheckResourceAttr(resourceName, "node_count", "1"), - ), - Destroy: false, - }, - { - Config: testAccResourceNcloudNKSNodePoolUpdateAutoScaleConfig(clusterName, clusterType, productCode, 2, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSNodePoolExists(resourceName, &nodePool), - resource.TestCheckResourceAttr(resourceName, "node_count", "2"), - resource.TestCheckResourceAttr(resourceName, "autoscale.0.enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "autoscale.0.min", "1"), - resource.TestCheckResourceAttr(resourceName, "autoscale.0.max", "2"), - ), - }, - }, - }) -} - -func TestAccResourceNcloudNKSNodePool_upgrade(t *testing.T) { - var nodePool vnks.NodePool - clusterName := "m3-" + getTestClusterName() - region, clusterType, productCode, k8sVersion := getRegionAndNKSType() + clusterName := fmt.Sprintf("m3-%s", getTestClusterName()) resourceName := "ncloud_nks_node_pool.node_pool" + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckNKSNodePoolDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), + Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, TF_TEST_NKS_LOGIN_KEY, nksInfo, 1), Check: resource.ComposeTestCheckFunc( testAccCheckNKSNodePoolExists(resourceName, &nodePool), - resource.TestCheckResourceAttr(resourceName, "node_count", "1"), ), Destroy: false, }, { - Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, "1.25.8-nks.1", region), - Check: resource.ComposeTestCheckFunc( - testAccCheckNKSNodePoolExists(resourceName, &nodePool), - resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), - ), + Config: testAccResourceNcloudNKSNodePoolConfigUpdateAll(clusterName, TF_TEST_NKS_LOGIN_KEY, nksInfo, 2), + Check: testAccResourceNcloudNKSNodePoolUpdateAllCheck(resourceName, clusterName, nksInfo), }, }, }) } -func TestAccResourceNcloudNKSNodePool_invalidNodeCount(t *testing.T) { +func TestAccResourceNcloudNKSNodePool_publicNetwork_XEN(t *testing.T) { + validateAcctestEnvironment(t) + clusterName := getTestClusterName() - region, clusterType, productCode, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_node_pool.node_pool" + nksInfo, err := getNKSTestInfo("XEN") + if err != nil { + t.Error(err) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckSubnetDestroy, + CheckDestroy: testAccCheckNKSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 0, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), - ExpectError: regexp.MustCompile("nodeCount must not be less than 1"), + Config: testAccResourceNcloudNKSNodePoolConfigPublicNetwork(clusterName, TF_TEST_NKS_LOGIN_KEY, nksInfo, 1), + Check: testAccResourceNcloudNKSNodePoolPublicNetworkCheck(resourceName, clusterName), }, }, }) } -func testAccResourceNcloudNKSNodePoolConfig(name string, clusterType string, productCode string, nodeCount int, loginKey string, version string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" -} - -resource "ncloud_subnet" "subnet" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s" - subnet = "10.2.1.0/24" - zone = "%[7]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" -} - -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[7]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" -} - +func testAccResourceNcloudNKSNodePoolConfig(name string, loginKeyName string, nksInfo *NKSTestInfo, nodeCount int32) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" - k8s_version = "%[6]s" - login_key_name = "%[5]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" + kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[7]s-1" + vpc_no = %[8]s + zone = "%[9]s-1" +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region)) + + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + + b.WriteString(` +} +`) + + b.WriteString(fmt.Sprintf(` +data "ncloud_nks_server_images" "image"{ + hypervisor_code = ncloud_nks_cluster.cluster.hypervisor_code + filter { + name = "label" + values = ["ubuntu-20.04"] + regex = true + } + +} + +data "ncloud_nks_server_products" "product"{ + software_code = data.ncloud_nks_server_images.image.images[0].value + zone = "%[1]s-1" + filter { + name = "product_type" + values = [ "STAND"] + } + + filter { + name = "cpu_count" + values = [ "2"] + } + + filter { + name = "memory_size" + values = [ "8GB" ] + } } resource "ncloud_nks_node_pool" "node_pool" { cluster_uuid = ncloud_nks_cluster.cluster.uuid - node_pool_name = "%[1]s" - node_count = %[4]d - product_code = "%[3]s" - k8s_version = "%[6]s" - subnet_no_list = [ ncloud_subnet.subnet.id] + node_pool_name = "%[2]s" + node_count = %[3]d + k8s_version = "%[4]s" + subnet_no_list = [ %[5]s ] autoscale { enabled = false - min = 1 - max = 1 + min = 0 + max = 0 } -} -`, name, clusterType, productCode, nodeCount, loginKey, version, region) -} + label { + key = "foo" + value = "bar" + } -func testAccResourceNcloudNKSNodePoolConfigPublicNetwork(name string, clusterType string, productCode string, nodeCount int, loginKey string, version string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" + taint { + key = "foo" + effect = "NoSchedule" + value = "bar" + } + + software_code = data.ncloud_nks_server_images.image.images.0.value +`, nksInfo.Region, name, nodeCount, nksInfo.K8sVersion, *nksInfo.PrivateSubnetList[0].SubnetNo)) + if nksInfo.HypervisorCode == "KVM" { + b.WriteString(` + server_spec_code = data.ncloud_nks_server_products.product.products.0.value + storage_size = 100 } + `) -resource "ncloud_subnet" "subnet" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s" - subnet = "10.2.1.0/24" - zone = "%[7]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PUBLIC" - usage_type = "GEN" + } else { + b.WriteString(` + product_code = data.ncloud_nks_server_products.product.products.0.value } + `) + } + return b.String() -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[7]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" } +func testAccResourceNcloudNKSNodePoolConfigPublicNetwork(name string, loginKeyName string, nksInfo *NKSTestInfo, nodeCount int32) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" - k8s_version = "%[6]s" - login_key_name = "%[5]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" + kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[7]s-1" + vpc_no = %[8]s + zone = "%[9]s-1" public_network = true +`, name, nksInfo.ClusterType, nksInfo.K8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PublicSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region)) + + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + + b.WriteString(` +} +`) + + b.WriteString(fmt.Sprintf(` + + +data "ncloud_nks_server_images" "image"{ + hypervisor_code = ncloud_nks_cluster.cluster.hypervisor_code + filter { + name = "label" + values = ["ubuntu-20.04"] + regex = true + } + +} + +data "ncloud_nks_server_products" "product"{ + software_code = data.ncloud_nks_server_images.image.images[0].value + zone = "KR-1" + filter { + name = "product_type" + values = [ "STAND"] + } + + filter { + name = "cpu_count" + values = [ "2"] + } + + filter { + name = "memory_size" + values = [ "8GB" ] + } } resource "ncloud_nks_node_pool" "node_pool" { cluster_uuid = ncloud_nks_cluster.cluster.uuid node_pool_name = "%[1]s" - node_count = %[4]d - product_code = "%[3]s" - subnet_no_list = [ ncloud_subnet.subnet.id] + node_count = %[2]d + k8s_version = "%[3]s" + subnet_no_list = [ %[4]s ] autoscale { enabled = false - min = 1 - max = 1 + min = 0 + max = 0 } -} -`, name, clusterType, productCode, nodeCount, loginKey, version, region) -} + label { + key = "foo" + value = "bar" + } + + taint { + key = "foo" + effect = "NoSchedule" + value = "bar" + } -func testAccResourceNcloudNKSNodePoolUpdateAutoScaleConfig(name string, clusterType string, productCode string, nodeCount int, loginKey string, version string, region string) string { - return fmt.Sprintf(` -resource "ncloud_vpc" "vpc" { - name = "%[1]s" - ipv4_cidr_block = "10.2.0.0/16" + software_code = data.ncloud_nks_server_images.image.images.0.value +`, name, nodeCount, nksInfo.K8sVersion, *nksInfo.PublicSubnetList[0].SubnetNo)) + if nksInfo.HypervisorCode == "KVM" { + b.WriteString(` + server_spec_code = data.ncloud_nks_server_products.product.products.0.value + storage_size = 100 } + `) -resource "ncloud_subnet" "subnet" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s" - subnet = "10.2.1.0/24" - zone = "%[7]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "GEN" + } else { + b.WriteString(` + product_code = data.ncloud_nks_server_products.product.products.0.value } + `) + } + return b.String() -resource "ncloud_subnet" "subnet_lb" { - vpc_no = ncloud_vpc.vpc.vpc_no - name = "%[1]s-lb" - subnet = "10.2.100.0/24" - zone = "%[7]s-1" - network_acl_no = ncloud_vpc.vpc.default_network_acl_no - subnet_type = "PRIVATE" - usage_type = "LOADB" } +func testAccResourceNcloudNKSNodePoolConfigUpdateAll(name string, loginKeyName string, nksInfo *NKSTestInfo, nodeCount int32) string { + var b bytes.Buffer + b.WriteString(fmt.Sprintf(` resource "ncloud_nks_cluster" "cluster" { name = "%[1]s" cluster_type = "%[2]s" - k8s_version = "%[6]s" - login_key_name = "%[5]s" - lb_private_subnet_no = ncloud_subnet.subnet_lb.id + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = %[5]s + hypervisor_code = "%[6]s" + kube_network_plugin = "cilium" subnet_no_list = [ - ncloud_subnet.subnet.id, + %[7]s ] - vpc_no = ncloud_vpc.vpc.vpc_no - zone = "%[7]s-1" + vpc_no = %[8]s + zone = "%[9]s-1" +`, name, nksInfo.ClusterType, nksInfo.UpgradeK8sVersion, loginKeyName, *nksInfo.PrivateLbSubnetList[0].SubnetNo, nksInfo.HypervisorCode, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.Vpc.VpcNo, nksInfo.Region)) + + if nksInfo.needPublicLb { + b.WriteString(fmt.Sprintf(` + lb_public_subnet_no = %[1]s +`, *nksInfo.PublicLbSubnetList[0].SubnetNo)) + } + + b.WriteString(` +} +`) + + b.WriteString(fmt.Sprintf(` + + +data "ncloud_nks_server_images" "image"{ + hypervisor_code = ncloud_nks_cluster.cluster.hypervisor_code + filter { + name = "label" + values = ["ubuntu-20.04"] + regex = true + } + +} + +data "ncloud_nks_server_products" "product"{ + software_code = data.ncloud_nks_server_images.image.images[0].value + zone = "KR-1" + filter { + name = "product_type" + values = [ "STAND"] + } + + filter { + name = "cpu_count" + values = [ "2"] + } + + filter { + name = "memory_size" + values = [ "8GB" ] + } } resource "ncloud_nks_node_pool" "node_pool" { cluster_uuid = ncloud_nks_cluster.cluster.uuid node_pool_name = "%[1]s" - node_count = %[4]d - product_code = "%[3]s" - subnet_no_list = [ ncloud_subnet.subnet.id] + node_count = %[2]d + k8s_version = "%[3]s" + subnet_no_list = [ %[4]s, %[5]s ] autoscale { enabled = true min = 1 max = 2 } + + label { + key = "bar" + value = "foor" + } + + taint { + key = "bar" + effect = "NoSchedule" + value = "" + } + + software_code = data.ncloud_nks_server_images.image.images.0.value +`, name, nodeCount, nksInfo.UpgradeK8sVersion, *nksInfo.PrivateSubnetList[0].SubnetNo, *nksInfo.PrivateSubnetList[1].SubnetNo)) + if nksInfo.HypervisorCode == "KVM" { + b.WriteString(` + server_spec_code = data.ncloud_nks_server_products.product.products.0.value + storage_size = 100 +} + `) + + } else { + b.WriteString(` + product_code = data.ncloud_nks_server_products.product.products.0.value +} + `) + } + return b.String() + } -`, name, clusterType, productCode, nodeCount, loginKey, version, region) + +func testAccResourceNcloudNKSNodePoolBasicCheck(resourceName string, name string, nksInfo *NKSTestInfo) (check resource.TestCheckFunc) { + var nodePool vnks.NodePool + check = resource.ComposeTestCheckFunc( + testAccCheckNKSNodePoolExists(resourceName, &nodePool), + resource.TestCheckResourceAttr(resourceName, "node_pool_name", name), + resource.TestCheckResourceAttr(resourceName, "node_count", "1"), + resource.TestCheckResourceAttr(resourceName, "autoscale.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "autoscale.0.min", "0"), + resource.TestCheckResourceAttr(resourceName, "autoscale.0.max", "0"), + resource.TestCheckResourceAttr(resourceName, "k8s_version", nksInfo.K8sVersion), + resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "1"), + resource.TestCheckResourceAttr(resourceName, "label.0.key", "foo"), + resource.TestCheckResourceAttr(resourceName, "label.0.value", "bar"), + resource.TestCheckResourceAttr(resourceName, "taint.0.key", "foo"), + resource.TestCheckResourceAttr(resourceName, "taint.0.value", "bar"), + resource.TestCheckResourceAttr(resourceName, "taint.0.action", "NoSchedule"), + ) + + if nksInfo.HypervisorCode == "KVM" { + check = resource.ComposeTestCheckFunc( + check, + resource.TestCheckResourceAttr(resourceName, "storage_size", "100"), + ) + } + return check +} + +func testAccResourceNcloudNKSNodePoolPublicNetworkCheck(resourceName string, name string) (check resource.TestCheckFunc) { + var nodePool vnks.NodePool + return resource.ComposeTestCheckFunc( + testAccCheckNKSNodePoolExists(resourceName, &nodePool), + resource.TestCheckResourceAttr(resourceName, "node_pool_name", name), + resource.TestCheckResourceAttr(resourceName, "node_count", "1"), + ) +} + +func testAccResourceNcloudNKSNodePoolUpdateAllCheck(resourceName string, name string, nksInfo *NKSTestInfo) (check resource.TestCheckFunc) { + var nodePool vnks.NodePool + return resource.ComposeTestCheckFunc( + testAccCheckNKSNodePoolExists(resourceName, &nodePool), + resource.TestCheckResourceAttr(resourceName, "node_pool_name", name), + resource.TestCheckResourceAttr(resourceName, "node_count", "2"), + resource.TestCheckResourceAttr(resourceName, "autoscale.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "autoscale.0.min", "1"), + resource.TestCheckResourceAttr(resourceName, "autoscale.0.max", "2"), + resource.TestCheckResourceAttr(resourceName, "k8s_version", nksInfo.UpgradeK8sVersion), + resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "2"), + resource.TestCheckResourceAttr(resourceName, "label.0.key", "bar"), + resource.TestCheckResourceAttr(resourceName, "label.0.value", "foo"), + resource.TestCheckResourceAttr(resourceName, "taint.0.key", "bar"), + resource.TestCheckResourceAttr(resourceName, "taint.0.value", ""), + resource.TestCheckResourceAttr(resourceName, "taint.0.action", "NoExecute"), + ) } func testAccCheckNKSNodePoolExists(n string, nodePool *vnks.NodePool) resource.TestCheckFunc { @@ -382,3 +525,26 @@ func testAccCheckNKSNodePoolDestroy(s *terraform.State) error { return nil } + +func testAccCheckNKSClusterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*ProviderConfig) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ncloud_nks_cluster" { + continue + } + + clusters, err := getNKSClusters(context.Background(), config) + if err != nil { + return err + } + + for _, cluster := range clusters { + if ncloud.StringValue(cluster.Uuid) == rs.Primary.ID { + return fmt.Errorf("Cluster still exists") + } + } + } + + return nil +} diff --git a/ncloud/structures.go b/ncloud/structures.go index ae76ae02b..713232095 100644 --- a/ncloud/structures.go +++ b/ncloud/structures.go @@ -1,8 +1,6 @@ package ncloud import ( - "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "reflect" "strconv" @@ -333,202 +331,6 @@ func expandStringInterfaceListToInt32List(list []interface{}) []*int32 { return res } -func flattenInt32ListToStringList(list []*int32) []*string { - res := make([]*string, 0) - for _, v := range list { - res = append(res, ncloud.IntString(int(ncloud.Int32Value(v)))) - } - return res -} - -func flattenNKSClusterLogInput[T *vnks.ClusterLogInput | *vnks.AuditLogDto](logInput T) []map[string]interface{} { - if logInput == nil { - return nil - } - - var audit bool - switch v := any(logInput).(type) { - case *vnks.ClusterLogInput: - audit = ncloud.BoolValue(v.Audit) - case *vnks.AuditLogDto: - audit = ncloud.BoolValue(v.Audit) - default: - return nil - } - - return []map[string]interface{}{ - { - "audit": audit, - }, - } -} -func expandNKSClusterLogInput[T *vnks.ClusterLogInput | *vnks.AuditLogDto](logList []interface{}, returnType T) T { - if len(logList) == 0 { - return nil - } - log := logList[0].(map[string]interface{}) - switch any(returnType).(type) { - case *vnks.ClusterLogInput: - return T(&vnks.ClusterLogInput{ - Audit: ncloud.Bool(log["audit"].(bool)), - }) - case *vnks.AuditLogDto: - return T(&vnks.AuditLogDto{ - Audit: ncloud.Bool(log["audit"].(bool)), - }) - default: - return nil - } - -} - -func flattenNKSClusterOIDCSpec(oidcSpec *vnks.OidcRes) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - if oidcSpec == nil || !*oidcSpec.Status { - return res - } - - res = []map[string]interface{}{ - { - "issuer_url": ncloud.StringValue(oidcSpec.IssuerURL), - "client_id": ncloud.StringValue(oidcSpec.ClientId), - "username_claim": ncloud.StringValue(oidcSpec.UsernameClaim), - "username_prefix": ncloud.StringValue(oidcSpec.UsernamePrefix), - "groups_claim": ncloud.StringValue(oidcSpec.GroupsClaim), - "groups_prefix": ncloud.StringValue(oidcSpec.GroupsPrefix), - "required_claim": ncloud.StringValue(oidcSpec.RequiredClaim), - }, - } - return res -} - -func expandNKSClusterOIDCSpec(oidc []interface{}) *vnks.UpdateOidcDto { - res := &vnks.UpdateOidcDto{Status: ncloud.Bool(false)} - if len(oidc) == 0 { - return res - } - - oidcSpec := oidc[0].(map[string]interface{}) - if oidcSpec["issuer_url"].(string) != "" && oidcSpec["client_id"].(string) != "" { - res.Status = ncloud.Bool(true) - res.IssuerURL = ncloud.String(oidcSpec["issuer_url"].(string)) - res.ClientId = ncloud.String(oidcSpec["client_id"].(string)) - - usernameClaim, ok := oidcSpec["username_claim"] - if ok { - res.UsernameClaim = ncloud.String(usernameClaim.(string)) - } - usernamePrefix, ok := oidcSpec["username_prefix"] - if ok { - res.UsernamePrefix = ncloud.String(usernamePrefix.(string)) - } - groupsClaim, ok := oidcSpec["groups_claim"] - if ok { - res.GroupsClaim = ncloud.String(groupsClaim.(string)) - } - groupsPrefix, ok := oidcSpec["groups_prefix"] - if ok { - res.GroupsPrefix = ncloud.String(groupsPrefix.(string)) - } - requiredClaims, ok := oidcSpec["required_claim"] - if ok { - res.RequiredClaim = ncloud.String(requiredClaims.(string)) - } - } - - return res -} - -func flattenNKSClusterIPAclEntries(ipAcl *vnks.IpAclsRes) *schema.Set { - - ipAclList := schema.NewSet(schema.HashResource(resourceNcloudNKSCluster().Schema["ip_acl"].Elem.(*schema.Resource)), []interface{}{}) - - for _, entry := range ipAcl.Entries { - m := map[string]interface{}{ - "action": *entry.Action, - "address": *entry.Address, - } - if entry.Comment != nil { - m["comment"] = *entry.Comment - } - ipAclList.Add(m) - } - - return ipAclList - -} - -func expandNKSClusterIPAcl(acl interface{}) []*vnks.IpAclsEntriesDto { - if acl == nil { - return nil - } - - set := acl.(*schema.Set) - res := make([]*vnks.IpAclsEntriesDto, 0) - for _, raw := range set.List() { - entry := raw.(map[string]interface{}) - - add := &vnks.IpAclsEntriesDto{ - Address: ncloud.String(entry["address"].(string)), - Action: ncloud.String(entry["action"].(string)), - } - if comment, exist := entry["comment"].(string); exist { - add.Comment = ncloud.String(comment) - } - res = append(res, add) - } - - return res -} - -func flattenNKSNodePoolAutoScale(ao *vnks.AutoscaleOption) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - if ao == nil { - return res - } - m := map[string]interface{}{ - "enabled": ncloud.BoolValue(ao.Enabled), - "min": ncloud.Int32Value(ao.Min), - "max": ncloud.Int32Value(ao.Max), - } - res = append(res, m) - return res -} - -func expandNKSNodePoolAutoScale(as []interface{}) *vnks.AutoscalerUpdate { - if len(as) == 0 { - return nil - } - autoScale := as[0].(map[string]interface{}) - return &vnks.AutoscalerUpdate{ - Enabled: ncloud.Bool(autoScale["enabled"].(bool)), - Min: ncloud.Int32(int32(autoScale["min"].(int))), - Max: ncloud.Int32(int32(autoScale["max"].(int))), - } -} - -func flattenNKSWorkerNodes(wns []*vnks.WorkerNode) []map[string]interface{} { - res := make([]map[string]interface{}, 0) - if wns == nil { - return res - } - for _, wn := range wns { - m := map[string]interface{}{ - "name": ncloud.StringValue(wn.Name), - "instance_no": ncloud.Int32Value(wn.Id), - "spec": ncloud.StringValue(wn.ServerSpec), - "private_ip": ncloud.StringValue(wn.PrivateIp), - "public_ip": ncloud.StringValue(wn.PublicIp), - "node_status": ncloud.StringValue(wn.K8sStatus), - "container_version": ncloud.StringValue(wn.DockerVersion), - "kernel_version": ncloud.StringValue(wn.KernelVersion), - } - res = append(res, m) - } - - return res -} - func expandSourceBuildEnvVarsParams(eVars []interface{}) ([]*sourcebuild.ProjectEnvEnvVars, error) { envVars := make([]*sourcebuild.ProjectEnvEnvVars, 0, len(eVars)) diff --git a/ncloud/structures_test.go b/ncloud/structures_test.go index b7d8188e0..34899dec8 100644 --- a/ncloud/structures_test.go +++ b/ncloud/structures_test.go @@ -1,8 +1,6 @@ package ncloud import ( - "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "reflect" "testing" @@ -696,310 +694,6 @@ func TestExpandStringInterfaceListToInt32List(t *testing.T) { } } -func TestFlattenInt32ListToStringList(t *testing.T) { - initialList := []*int32{ - ncloud.Int32(int32(1111)), - ncloud.Int32(int32(2222)), - ncloud.Int32(int32(3333)), - } - - stringList := flattenInt32ListToStringList(initialList) - expected := []*string{ - ncloud.String("1111"), - ncloud.String("2222"), - ncloud.String("3333")} - if !reflect.DeepEqual(stringList, expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - stringList, - expected) - } -} - -func TestFlattenNKSClusterLogInput(t *testing.T) { - logInput := &vnks.ClusterLogInput{Audit: ncloud.Bool(true)} - - result := flattenNKSClusterLogInput(logInput) - - if result == nil { - t.Fatal("result was nil") - } - - r := result[0] - if r["audit"].(bool) != true { - t.Fatalf("expected result enabled to be true, but was %v", r["enabled"]) - } -} - -func TestExpandNKSClusterLogInput(t *testing.T) { - log := []interface{}{ - map[string]interface{}{ - "audit": false, - }, - } - - result := expandNKSClusterLogInput(log, &vnks.AuditLogDto{}) - - if result == nil { - t.Fatal("result was nil") - } - - if ncloud.BoolValue(result.Audit) != false { - t.Fatalf("expected false , but got %v", ncloud.BoolValue(result.Audit)) - } -} - -func TestFlattenNKSClusterOIDCSpec(t *testing.T) { - oidcSpec := &vnks.OidcRes{ - Status: ncloud.Bool(true), - UsernameClaim: ncloud.String("email"), - UsernamePrefix: ncloud.String("username:"), - IssuerURL: ncloud.String("https://sso.ntruss.com/iss"), - ClientId: ncloud.String("testClient"), - GroupsPrefix: ncloud.String("groups:"), - GroupsClaim: ncloud.String("group"), - RequiredClaim: ncloud.String("iss=https://sso.ntruss.com/iss"), - } - - result := flattenNKSClusterOIDCSpec(oidcSpec) - - if len(result) == 0 { - t.Fatal("empty result") - } - - r := result[0] - - if r["username_claim"].(string) != "email" { - t.Fatalf("expected result username_claim to be 'email', but was %v", r["username_claim"]) - } - - if r["username_prefix"].(string) != "username:" { - t.Fatalf("expected result username_prefix to be 'username:', but was %v", r["username_prefix"]) - } - - if r["issuer_url"].(string) != "https://sso.ntruss.com/iss" { - t.Fatalf("expected result issuer_url to be 'https://sso.ntruss.com/iss', but was %v", r["issuer_url"]) - } - - if r["client_id"].(string) != "testClient" { - t.Fatalf("expected result client_id to be 'testClient', but was %v", r["client_id"]) - } - - if r["groups_claim"].(string) != "group" { - t.Fatalf("expected result groups_claim to be 'group', but was %v", r["groups_claim"]) - } - - if r["groups_prefix"].(string) != "groups:" { - t.Fatalf("expected result groups_prefix to be 'groups:', but was %v", r["groups_prefix"]) - } - - if r["required_claim"].(string) != "iss=https://sso.ntruss.com/iss" { - t.Fatalf("expected result groups_prefix to be 'iss=https://sso.ntruss.com/iss', but was %v", r["required_claim"]) - } -} - -func TestExpandNKSClusterOIDCSpec(t *testing.T) { - oidc := []interface{}{ - map[string]interface{}{ - "issuer_url": "https://sso.ntruss.com/iss", - "client_id": "testClient", - "username_claim": "email", - "username_prefix": "username:", - "groups_claim": "group", - "groups_prefix": "groups:", - "required_claim": "iss=https://sso.ntruss.com/iss", - }, - } - - result := expandNKSClusterOIDCSpec(oidc) - - if result == nil { - t.Fatal("result was nil") - } - - expected := &vnks.UpdateOidcDto{ - Status: ncloud.Bool(true), - IssuerURL: ncloud.String("https://sso.ntruss.com/iss"), - ClientId: ncloud.String("testClient"), - UsernameClaim: ncloud.String("email"), - UsernamePrefix: ncloud.String("username:"), - GroupsClaim: ncloud.String("group"), - GroupsPrefix: ncloud.String("groups:"), - RequiredClaim: ncloud.String("iss=https://sso.ntruss.com/iss"), - } - - if reflect.DeepEqual(oidc, expected) != false { - t.Fatalf("expected %v , but got %v", expected, result) - } -} - -func TestFlattenNKSClusterIPAcl(t *testing.T) { - ipAcl := &vnks.IpAclsRes{ - DefaultAction: ncloud.String("deny"), - Entries: []*vnks.IpAclsEntriesRes{ - {Address: ncloud.String("10.0.1.0/24"), - Action: ncloud.String("allow"), - Comment: ncloud.String("master ip"), - }, - }, - } - - result := flattenNKSClusterIPAclEntries(ipAcl) - - if len(result.List()) == 0 { - t.Fatal("empty result") - } - - r := result.List()[0] - rr := r.(map[string]interface{}) - if rr["address"].(string) != "10.0.1.0/24" { - t.Fatalf("expected result address to be '10.0.1.0/24', but was %v", rr["address"]) - } - - if rr["action"].(string) != "allow" { - t.Fatalf("expected result action to be 'allow', but was %v", rr["action"]) - } - - if rr["comment"].(string) != "master ip" { - t.Fatalf("expected result comment to be 'master ip', but was %v", rr["comment"]) - } -} - -func TestExpandNKSClusterIPAcl(t *testing.T) { - ipAclList := schema.NewSet(schema.HashResource(resourceNcloudNKSCluster().Schema["ip_acl"].Elem.(*schema.Resource)), []interface{}{}) - - ipAclList.Add(map[string]interface{}{ - "action": "allow", - "address": "10.0.1.0/24", - "comment": "master ip", - }) - - result := expandNKSClusterIPAcl(ipAclList) - - if result == nil { - t.Fatal("result was nil") - } - - expected := &vnks.IpAclsEntriesDto{ - Address: ncloud.String("10.0.1.0/24"), - Action: ncloud.String("allow"), - Comment: ncloud.String("maseter ip"), - } - - if reflect.DeepEqual(result, expected) != false { - t.Fatalf("expected %v , but got %v", expected, result) - } -} - -func TestFlattenNKSNodePoolAutoscale(t *testing.T) { - expanded := &vnks.AutoscaleOption{ - Enabled: ncloud.Bool(true), - Max: ncloud.Int32(2), - Min: ncloud.Int32(2), - } - - result := flattenNKSNodePoolAutoScale(expanded) - - if result == nil { - t.Fatal("result was nil") - } - - r := result[0] - if r["enabled"].(bool) != true { - t.Fatalf("expected result enabled to be true, but was %v", r["enabled"]) - } - - if r["min"].(int32) != 2 { - t.Fatalf("expected result min to be 2, but was %d", r["min"]) - } - - if r["max"].(int32) != 2 { - t.Fatalf("expected result max to be 2, but was %d", r["max"]) - } -} - -func TestFlattenNKSWorkerNodes(t *testing.T) { - expanded := []*vnks.WorkerNode{ - { - Id: ncloud.Int32(1), - Name: ncloud.String("node1"), - ServerSpec: ncloud.String("[Standard] vCPU 2EA, Memory 8GB"), - PrivateIp: ncloud.String("10.0.1.4"), - PublicIp: ncloud.String(""), - K8sStatus: ncloud.String("Ready"), - DockerVersion: ncloud.String("containerd://1.3.7"), - KernelVersion: ncloud.String("5.4.0-65-generic"), - }, - } - - result := flattenNKSWorkerNodes(expanded) - - if result == nil { - t.Fatal("result was nil") - } - - r := result[0] - if r["instance_no"].(int32) != 1 { - t.Fatalf("expected result instance_no to be 1, but was %v", r["instance_no"]) - } - - if r["name"].(string) != "node1" { - t.Fatalf("expected result name to be node1, but was %s", r["name"]) - } - - if r["spec"].(string) != "[Standard] vCPU 2EA, Memory 8GB" { - t.Fatalf("expected result spec to be [Standard] vCPU 2EA, Memory 8GB, but was %s", r["spec"]) - } - - if r["private_ip"].(string) != "10.0.1.4" { - t.Fatalf("expected result private_ip to be 10.0.1.4, but was %s", r["private_ip"]) - } - - if r["public_ip"].(string) != "" { - t.Fatalf("expected result public_ip to be emtpy, but was %s", r["public_ip"]) - } - - if r["node_status"].(string) != "Ready" { - t.Fatalf("expected result node_status to be Ready, but was %s", r["node_status"]) - } - - if r["container_version"].(string) != "containerd://1.3.7" { - t.Fatalf("expected result container_version to be containerd://1.3.7, but was %s", r["container_version"]) - } - - if r["kernel_version"].(string) != "5.4.0-65-generic" { - t.Fatalf("expected result kernel_version to be 5.4.0-65-generic, but was %s", r["kernel_version"]) - } -} - -func TestExpandNKSNodePoolAutoScale(t *testing.T) { - autoscaleList := []interface{}{ - map[string]interface{}{ - "enabled": true, - "min": 2, - "max": 2, - }, - } - - result := expandNKSNodePoolAutoScale(autoscaleList) - - if result == nil { - t.Fatal("result was nil") - } - - if ncloud.BoolValue(result.Enabled) != true { - t.Fatalf("expected result true, but got %v", ncloud.BoolValue(result.Enabled)) - } - - if ncloud.Int32Value(result.Min) != int32(2) { - t.Fatalf("expected result 2, but got %d", ncloud.Int32Value(result.Min)) - } - - if ncloud.Int32Value(result.Max) != int32(2) { - t.Fatalf("expected result 2, but got %d", ncloud.Int32Value(result.Max)) - } -} - func TestExpandSourceBuildEnvVarsParams(t *testing.T) { envVars := []interface{}{ map[string]interface{}{