diff --git a/docs/resources/dw_aws_cluster.md b/docs/resources/dw_aws_cluster.md
index a678f4fc..ded1de43 100644
--- a/docs/resources/dw_aws_cluster.md
+++ b/docs/resources/dw_aws_cluster.md
@@ -83,10 +83,12 @@ output "name" {
### Read-Only
- `cluster_id` (String) The id of the cluster.
+- `default_database_catalog` (Attributes) (see [below for nested schema](#nestedatt--default_database_catalog))
- `id` (String) The ID of this resource.
- `last_updated` (String) Timestamp of the last Terraform update of the order.
- `name` (String) The name of the cluster matches the environment name.
- `status` (String) The status of the cluster.
+- `version` (String) The version of the cluster.
### Nested Schema for `network_settings`
@@ -119,7 +121,6 @@ Required:
Optional:
-- `additional_instance_types` (List of String) The additional instance types that the environment is allowed to use, listed in their priority order. They will be used instead of the primary compute instance type in case it is unavailable. You cannot include any instance type that was already indicated in computeInstanceTypes.
- `compute_instance_types` (List of String) The compute instance types that the environment is restricted to use. This affects the creation of virtual warehouses where this restriction will apply. Select an instance type that meets your computing, memory, networking, or storage needs. As of now, only a single instance type can be listed.
- `custom_ami_id` (String) The custom AMI ID to use for worker nodes.
- `enable_spot_instances` (Boolean) Whether to use spot instances for worker nodes.
@@ -133,3 +134,14 @@ Optional:
- `async` (Boolean) Boolean value that specifies if Terraform should wait for resource creation/deletion.
- `call_failure_threshold` (Number) Threshold value that specifies how many times should a single call failure happen before giving up the polling.
- `polling_timeout` (Number) Timeout value in minutes that specifies for how long should the polling go for resource creation/deletion.
+
+
+
+### Nested Schema for `default_database_catalog`
+
+Read-Only:
+
+- `id` (String) The ID of the database catalog.
+- `last_updated` (String) Timestamp of the last Terraform update of the order.
+- `name` (String) The name of the database catalog.
+- `status` (String) The status of the database catalog.
diff --git a/resources/dw/cluster/aws/model_cluster.go b/resources/dw/cluster/aws/model_cluster.go
index 5e4f7831..bb76ace5 100644
--- a/resources/dw/cluster/aws/model_cluster.go
+++ b/resources/dw/cluster/aws/model_cluster.go
@@ -11,9 +11,13 @@
package aws
import (
+ "context"
"time"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/cloudera/terraform-provider-cdp/cdp-sdk-go/gen/dw/models"
"github.com/cloudera/terraform-provider-cdp/utils"
@@ -35,10 +39,9 @@ type customRegistryOptions struct {
}
type instanceResourceModel struct {
- CustomAmiID types.String `tfsdk:"custom_ami_id"`
- EnableSpotInstances types.Bool `tfsdk:"enable_spot_instances"`
- ComputeInstanceTypes types.List `tfsdk:"compute_instance_types"`
- AdditionalInstanceTypes types.List `tfsdk:"additional_instance_types"`
+ CustomAmiID types.String `tfsdk:"custom_ami_id"`
+ EnableSpotInstances types.Bool `tfsdk:"enable_spot_instances"`
+ ComputeInstanceTypes types.List `tfsdk:"compute_instance_types"`
}
type resourceModel struct {
@@ -48,16 +51,31 @@ type resourceModel struct {
ClusterID types.String `tfsdk:"cluster_id"`
LastUpdated types.String `tfsdk:"last_updated"`
Status types.String `tfsdk:"status"`
+ Version types.String `tfsdk:"version"`
NodeRoleCDWManagedPolicyArn types.String `tfsdk:"node_role_cdw_managed_policy_arn"`
DatabaseBackupRetentionDays types.Int64 `tfsdk:"database_backup_retention_days"`
CustomRegistryOptions *customRegistryOptions `tfsdk:"custom_registry_options"`
CustomSubdomain types.String `tfsdk:"custom_subdomain"`
NetworkSettings *networkResourceModel `tfsdk:"network_settings"`
- InstanceSettings *instanceResourceModel `tfsdk:"instance_settings"`
+ InstanceSettings types.Object `tfsdk:"instance_settings"`
+ DefaultDatabaseCatalog types.Object `tfsdk:"default_database_catalog"`
PollingOptions *utils.PollingOptions `tfsdk:"polling_options"`
}
-func (p *resourceModel) convertToCreateAwsClusterRequest() *models.CreateAwsClusterRequest {
+func (p *resourceModel) convertToCreateAwsClusterRequest(ctx context.Context) (*models.CreateAwsClusterRequest, diag.Diagnostics) {
+ enableSpotInstances, diags := p.getEnableSpotInstances(ctx)
+ if diags.HasError() {
+ return nil, diags
+ }
+ customAmiID, diags := p.getCustomAmiID(ctx)
+ if diags.HasError() {
+ return nil, diags
+ }
+ computeInstanceTypes, diags := p.getComputeInstanceTypes(ctx)
+ if diags.HasError() {
+ return nil, diags
+ }
+
return &models.CreateAwsClusterRequest{
EnvironmentCrn: p.Crn.ValueStringPointer(),
UseOverlayNetwork: p.NetworkSettings.UseOverlayNetwork.ValueBool(),
@@ -71,31 +89,28 @@ func (p *resourceModel) convertToCreateAwsClusterRequest() *models.CreateAwsClus
DatabaseBackupRetentionPeriod: utils.Int64To32Pointer(p.DatabaseBackupRetentionDays),
CustomSubdomain: p.CustomSubdomain.ValueString(),
CustomRegistryOptions: p.getCustomRegistryOptions(),
- EnableSpotInstances: p.getEnableSpotInstances(),
- CustomAmiID: p.getCustomAmiID(),
- ComputeInstanceTypes: p.getComputeInstanceTypes(),
- }
+ EnableSpotInstances: enableSpotInstances,
+ CustomAmiID: customAmiID,
+ ComputeInstanceTypes: computeInstanceTypes,
+ }, diags
}
-func (p *resourceModel) getEnableSpotInstances() *bool {
- if i := p.InstanceSettings; i != nil {
- return i.EnableSpotInstances.ValueBoolPointer()
- }
- return nil
+func (p *resourceModel) getEnableSpotInstances(ctx context.Context) (*bool, diag.Diagnostics) {
+ var irm instanceResourceModel
+ diags := p.InstanceSettings.As(ctx, &irm, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true})
+ return irm.EnableSpotInstances.ValueBoolPointer(), diags
}
-func (p *resourceModel) getCustomAmiID() string {
- if i := p.InstanceSettings; i != nil {
- return p.InstanceSettings.CustomAmiID.ValueString()
- }
- return ""
+func (p *resourceModel) getCustomAmiID(ctx context.Context) (string, diag.Diagnostics) {
+ var irm instanceResourceModel
+ diags := p.InstanceSettings.As(ctx, &irm, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true})
+ return irm.CustomAmiID.ValueString(), diags
}
-func (p *resourceModel) getComputeInstanceTypes() []string {
- if i := p.InstanceSettings; i != nil {
- return utils.FromListValueToStringList(p.InstanceSettings.ComputeInstanceTypes)
- }
- return nil
+func (p *resourceModel) getComputeInstanceTypes(ctx context.Context) ([]string, diag.Diagnostics) {
+ var irm instanceResourceModel
+ diags := p.InstanceSettings.As(ctx, &irm, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true})
+ return utils.FromListValueToStringList(irm.ComputeInstanceTypes), diags
}
func (p *resourceModel) getCustomRegistryOptions() *models.CustomRegistryOptions {
@@ -108,16 +123,50 @@ func (p *resourceModel) getCustomRegistryOptions() *models.CustomRegistryOptions
return nil
}
-func (p *resourceModel) getPollingTimeout() time.Duration {
- if p.PollingOptions != nil {
- return time.Duration(p.PollingOptions.PollingTimeout.ValueInt64()) * time.Minute
+func (p *resourceModel) GetPollingOptions() *utils.PollingOptions {
+ return p.PollingOptions
+}
+
+func (p *resourceModel) setResourceModel(ctx context.Context, resp *models.DescribeClusterResponse) diag.Diagnostics {
+ p.ID = types.StringValue(resp.Cluster.EnvironmentCrn)
+ p.Crn = types.StringValue(resp.Cluster.EnvironmentCrn)
+ p.Name = types.StringValue(resp.Cluster.Name)
+ p.Status = types.StringValue(resp.Cluster.Status)
+ p.Version = types.StringValue(resp.Cluster.Version)
+ p.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
+ var irm instanceResourceModel
+ diags := p.InstanceSettings.As(ctx, &irm, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true, UnhandledUnknownAsEmpty: true})
+ if diags.HasError() {
+ return diags
}
- return 40 * time.Minute
+ attributeTypes := map[string]attr.Type{
+ "custom_ami_id": types.StringType,
+ "enable_spot_instances": types.BoolType,
+ "compute_instance_types": types.ListType{ElemType: types.StringType},
+ }
+ attributes := map[string]attr.Value{
+ "custom_ami_id": irm.CustomAmiID,
+ "enable_spot_instances": basetypes.NewBoolValue(resp.Cluster.EnableSpotInstances),
+ "compute_instance_types": utils.FromStringListToListValue(resp.Cluster.ComputeInstanceTypes),
+ }
+ p.InstanceSettings, diags = basetypes.NewObjectValue(attributeTypes, attributes)
+ return diags
}
-func (p *resourceModel) getCallFailureThreshold() int {
- if p.PollingOptions != nil {
- return int(p.PollingOptions.CallFailureThreshold.ValueInt64())
+func (p *resourceModel) setDefaultDatabaseCatalog(catalog *models.DbcSummary) diag.Diagnostics {
+ attributeTypes := map[string]attr.Type{
+ "id": types.StringType,
+ "name": types.StringType,
+ "last_updated": types.StringType,
+ "status": types.StringType,
+ }
+ attributes := map[string]attr.Value{
+ "id": basetypes.NewStringValue(catalog.ID),
+ "name": basetypes.NewStringValue(catalog.Name),
+ "last_updated": basetypes.NewStringValue(time.Now().Format(time.RFC850)),
+ "status": basetypes.NewStringValue(catalog.Status),
}
- return 3
+ dbc, diags := basetypes.NewObjectValue(attributeTypes, attributes)
+ p.DefaultDatabaseCatalog = dbc
+ return diags
}
diff --git a/resources/dw/cluster/aws/model_cluster_test.go b/resources/dw/cluster/aws/model_cluster_test.go
index e0ef71a0..8c4251fe 100644
--- a/resources/dw/cluster/aws/model_cluster_test.go
+++ b/resources/dw/cluster/aws/model_cluster_test.go
@@ -13,7 +13,6 @@ package aws
import (
"context"
"testing"
- "time"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/tfsdk"
@@ -44,7 +43,7 @@ func (s *DwClusterModelTestSuite) SetupSuite() {
}
func (s *DwClusterModelTestSuite) TestConvertToCreateAwsClusterRequest() {
- awsCluster := s.rm.convertToCreateAwsClusterRequest()
+ awsCluster, _ := s.rm.convertToCreateAwsClusterRequest(context.TODO())
s.Equal("crn", *awsCluster.EnvironmentCrn)
s.Equal(true, awsCluster.UseOverlayNetwork)
s.Equal([]string{"cidr-1", "cidr-2", "cidr-3"}, awsCluster.WhitelistK8sClusterAccessIPCIDRs)
@@ -61,13 +60,3 @@ func (s *DwClusterModelTestSuite) TestConvertToCreateAwsClusterRequest() {
s.Equal("", awsCluster.CustomAmiID)
s.Equal([]string{}, awsCluster.ComputeInstanceTypes)
}
-
-func (s *DwClusterModelTestSuite) TestGetPollingTimeout() {
- timeout := s.rm.getPollingTimeout()
- s.Equal(90*time.Minute, timeout)
-}
-
-func (s *DwClusterModelTestSuite) TestGetCallFailureThreshold() {
- out := s.rm.getCallFailureThreshold()
- s.Equal(3, out)
-}
diff --git a/resources/dw/cluster/aws/resource_cluster.go b/resources/dw/cluster/aws/resource_cluster.go
index ad237d9a..b7400603 100644
--- a/resources/dw/cluster/aws/resource_cluster.go
+++ b/resources/dw/cluster/aws/resource_cluster.go
@@ -16,6 +16,7 @@ import (
"strings"
"time"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
@@ -67,26 +68,16 @@ func (r *dwClusterResource) Create(ctx context.Context, req resource.CreateReque
return
}
- // Generate API request body from plan
- clusterParams := operations.NewCreateAwsClusterParamsWithContext(ctx).
- WithInput(plan.convertToCreateAwsClusterRequest())
-
- // Create new AWS cluster
- response, err := r.client.Dw.Operations.CreateAwsCluster(clusterParams)
- if err != nil {
- resp.Diagnostics.AddError(
- "Error creating Data Warehouse AWS cluster",
- "Could not create cluster, unexpected error: "+err.Error(),
- )
+ response, diags := r.createCluster(ctx, plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
return
}
payload := response.GetPayload()
clusterID := &payload.ClusterID
plan.ClusterID = types.StringValue(*clusterID)
- desc := operations.NewDescribeClusterParamsWithContext(ctx).
- WithInput(&models.DescribeClusterRequest{ClusterID: clusterID})
- describe, err := r.client.Dw.Operations.DescribeCluster(desc)
+ describe, err := r.describeCluster(ctx, clusterID)
if err != nil {
resp.Diagnostics.AddError(
"Error creating Data Warehouse AWS cluster",
@@ -94,50 +85,154 @@ func (r *dwClusterResource) Create(ctx context.Context, req resource.CreateReque
)
return
}
-
- cluster := describe.GetPayload()
-
- // Map response body to schema and populate Computed attribute values
- plan.ID = types.StringValue(cluster.Cluster.EnvironmentCrn)
- plan.Crn = types.StringValue(cluster.Cluster.EnvironmentCrn)
- plan.Name = types.StringValue(cluster.Cluster.Name)
- plan.Status = types.StringValue(cluster.Cluster.Status)
- plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
-
- // Set state to fully populated data
+ plan.setResourceModel(ctx, describe.GetPayload())
diags = resp.State.Set(ctx, plan)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
- if !(plan.PollingOptions != nil && plan.PollingOptions.Async.ValueBool()) {
- callFailedCount := 0
- stateConf := &retry.StateChangeConf{
- Pending: []string{"Accepted", "Creating", "Created", "Starting"},
- Target: []string{"Running"},
- Delay: 30 * time.Second,
- Timeout: plan.getPollingTimeout(),
- PollInterval: 30 * time.Second,
- Refresh: r.stateRefresh(ctx, clusterID, &callFailedCount, plan.getCallFailureThreshold()),
+ if opts := plan.PollingOptions; opts == nil || opts.Async.ValueBool() {
+ cluster, diags := r.waitForClusterCreation(ctx, &plan, clusterID)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
}
- if _, err = stateConf.WaitForStateContext(ctx); err != nil {
- resp.Diagnostics.AddError(
- "Error waiting for Data Warehouse AWS cluster",
- "Could not create cluster, unexpected error: "+err.Error(),
- )
+ plan.setResourceModel(ctx, cluster)
+ diags = resp.State.Set(ctx, plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ catalog, diags := r.waitForDefaultDatabaseCatalogCreation(ctx, &plan, clusterID)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ diags = plan.setDefaultDatabaseCatalog(catalog)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
return
}
- plan.Status = types.StringValue(cluster.Cluster.Status)
- plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850))
}
diags = resp.State.Set(ctx, plan)
resp.Diagnostics.Append(diags...)
}
+func (r *dwClusterResource) waitForClusterCreation(ctx context.Context, plan *resourceModel, clusterID *string) (*models.DescribeClusterResponse, diag.Diagnostics) {
+ diags := diag.Diagnostics{}
+ if _, err := retryStateConf(ctx, plan, setupRetryCfg(clusterID), r.clusterStateRefresh).WaitForStateContext(ctx); err != nil {
+ diags.AddError(
+ "Error waiting for Data Warehouse AWS cluster",
+ "Could not create cluster, unexpected error: "+err.Error(),
+ )
+ return nil, diags
+ }
+
+ describe, err := r.describeCluster(ctx, clusterID)
+ if err != nil {
+ diags.AddError(
+ "Error creating Data Warehouse AWS cluster",
+ "Could not describe cluster, unexpected error: "+err.Error(),
+ )
+ return nil, diags
+ }
+
+ return describe.GetPayload(), diags
+}
+
+func (r *dwClusterResource) waitForDefaultDatabaseCatalogCreation(ctx context.Context, plan *resourceModel, clusterID *string) (*models.DbcSummary, diag.Diagnostics) {
+ diags := diag.Diagnostics{}
+ if _, err := retryStateConf(ctx, plan, setupRetryCfg(clusterID), r.databaseCatalogStateRefresh).WaitForStateContext(ctx); err != nil {
+ diags.AddError(
+ "Error waiting for Data Warehouse database catalog",
+ fmt.Sprintf("Could not create database catalog, unexpected error: %v", err),
+ )
+ return nil, diags
+ }
+ catalog, err := r.getDatabaseCatalog(ctx, clusterID)
+ if err != nil {
+ diags.AddError(
+ "Error finding Data Warehouse database catalog", fmt.Sprintf("unexpected error: %v", err),
+ )
+ return nil, diags
+ }
+
+ return catalog, diags
+}
+
+func (r *dwClusterResource) createCluster(ctx context.Context, plan resourceModel) (*operations.CreateAwsClusterOK, diag.Diagnostics) {
+ // Generate API request body from plan
+ req, diags := plan.convertToCreateAwsClusterRequest(ctx)
+ if diags.HasError() {
+ return nil, diags
+ }
+ clusterParams := operations.NewCreateAwsClusterParamsWithContext(ctx).
+ WithInput(req)
+
+ // Create new AWS cluster
+ response, err := r.client.Dw.Operations.CreateAwsCluster(clusterParams)
+ if err != nil {
+ diags.AddError("Error creating Data Warehouse AWS cluster", "Could not create cluster, unexpected error: "+err.Error())
+ }
+ return response, diags
+}
+
+func (r *dwClusterResource) describeCluster(ctx context.Context, clusterID *string) (*operations.DescribeClusterOK, error) {
+ desc := operations.NewDescribeClusterParamsWithContext(ctx).
+ WithInput(&models.DescribeClusterRequest{ClusterID: clusterID})
+ describe, err := r.client.Dw.Operations.DescribeCluster(desc)
+ return describe, err
+}
+
+func (r *dwClusterResource) deleteCluster(ctx context.Context, clusterID *string) (*operations.DeleteClusterOK, error) {
+ op := operations.NewDeleteClusterParamsWithContext(ctx).
+ WithInput(&models.DeleteClusterRequest{
+ ClusterID: clusterID,
+ })
+ resp, err := r.client.Dw.Operations.DeleteCluster(op)
+ return resp, err
+}
+
+func (r *dwClusterResource) getDatabaseCatalog(ctx context.Context, clusterID *string) (*models.DbcSummary, error) {
+ response, err := r.listDatabaseCatalogs(ctx, clusterID)
+ if err != nil {
+ err = fmt.Errorf("could not list database catalogs, unexpected error: %s", err.Error())
+ return nil, err
+ }
+ resp := response.GetPayload()
+ if len(resp.Dbcs) != 1 {
+ err = fmt.Errorf("exactly one Data Warehouse database catalog should be deployed for cluster %s", *clusterID)
+ return nil, err
+ }
+ return resp.Dbcs[0], nil
+}
+
func (r *dwClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
- tflog.Warn(ctx, "Read operation is not implemented yet.")
+ var state resourceModel
+ diags := req.State.Get(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if diags.HasError() {
+ return
+ }
+
+ describe, err := r.describeCluster(ctx, state.ClusterID.ValueStringPointer())
+ if err != nil {
+ resp.Diagnostics.AddError(
+ "Error creating Data Warehouse AWS cluster",
+ "Could not describe cluster, unexpected error: "+err.Error(),
+ )
+ return
+ }
+ diags = state.setResourceModel(ctx, describe.GetPayload())
+ resp.Diagnostics.Append(diags...)
+ if diags.HasError() {
+ return
+ }
+ diags = resp.State.Set(ctx, state)
+ resp.Diagnostics.Append(diags...)
}
func (r *dwClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
@@ -155,12 +250,7 @@ func (r *dwClusterResource) Delete(ctx context.Context, req resource.DeleteReque
}
clusterID := state.ClusterID.ValueStringPointer()
- op := operations.NewDeleteClusterParamsWithContext(ctx).
- WithInput(&models.DeleteClusterRequest{
- ClusterID: clusterID,
- })
-
- if _, err := r.client.Dw.Operations.DeleteCluster(op); err != nil {
+ if _, err := r.deleteCluster(ctx, clusterID); err != nil {
resp.Diagnostics.AddError(
"Error deleting Data Warehouse AWS cluster",
"Could not delete cluster, unexpected error: "+err.Error(),
@@ -168,17 +258,8 @@ func (r *dwClusterResource) Delete(ctx context.Context, req resource.DeleteReque
return
}
- if !(state.PollingOptions != nil && state.PollingOptions.Async.ValueBool()) {
- callFailedCount := 0
- stateConf := &retry.StateChangeConf{
- Pending: []string{"Deleting", "Running"},
- Target: []string{"Deleted"}, // This is not an actual state, we added it to fake the state change
- Delay: 30 * time.Second,
- Timeout: state.getPollingTimeout(),
- PollInterval: 30 * time.Second,
- Refresh: r.stateRefresh(ctx, clusterID, &callFailedCount, state.getCallFailureThreshold()),
- }
- if _, err := stateConf.WaitForStateContext(ctx); err != nil {
+ if opts := state.PollingOptions; opts == nil || opts.Async.ValueBool() {
+ if _, err := retryStateConf(ctx, &state, teardownRetryCfg(clusterID), r.clusterStateRefresh).WaitForStateContext(ctx); err != nil {
resp.Diagnostics.AddError(
"Error waiting for Data Warehouse AWS cluster",
"Could not delete cluster, unexpected error: "+err.Error(),
@@ -188,12 +269,54 @@ func (r *dwClusterResource) Delete(ctx context.Context, req resource.DeleteReque
}
}
-func (r *dwClusterResource) stateRefresh(ctx context.Context, clusterID *string, callFailedCount *int, callFailureThreshold int) func() (any, string, error) {
+type retryStateCfg struct {
+ clusterID *string
+ pending []string
+ target []string
+}
+
+func setupRetryCfg(clusterID *string) *retryStateCfg {
+ return &retryStateCfg{
+ clusterID: clusterID,
+ pending: []string{"Accepted", "Creating", "Created", "Loading", "Starting"},
+ target: []string{"Running"},
+ }
+}
+
+func teardownRetryCfg(clusterID *string) *retryStateCfg {
+ return &retryStateCfg{
+ clusterID: clusterID,
+ pending: []string{"Deleting", "Running", "Stopping", "Stopped", "Creating", "Created", "Starting", "Updating"},
+ target: []string{"Deleted"},
+ }
+}
+
+type stateRefresherFunc func(
+ ctx context.Context,
+ clusterID *string,
+ callFailedCount *int,
+ callFailureThreshold int) func() (any, string, error)
+
+func retryStateConf(
+ ctx context.Context,
+ po utils.HasPollingOptions,
+ status *retryStateCfg,
+ stateRefresher stateRefresherFunc) *retry.StateChangeConf {
+ callFailedCount := 0
+ return &retry.StateChangeConf{
+ Pending: status.pending,
+ Target: status.target, // Deleted is not an actual state, we added it to fake the state change
+ Delay: 30 * time.Second,
+ Timeout: utils.GetPollingTimeout(po, 40*time.Minute),
+ PollInterval: 30 * time.Second,
+ Refresh: stateRefresher(ctx, status.clusterID, &callFailedCount, utils.GetCallFailureThreshold(po, 3)),
+ }
+}
+
+func (r *dwClusterResource) clusterStateRefresh(ctx context.Context, clusterID *string, callFailedCount *int, callFailureThreshold int) func() (any, string, error) {
return func() (any, string, error) {
tflog.Debug(ctx, "About to describe cluster")
- params := operations.NewDescribeClusterParamsWithContext(ctx).
- WithInput(&models.DescribeClusterRequest{ClusterID: clusterID})
- resp, err := r.client.Dw.Operations.DescribeCluster(params)
+ resp, err := r.describeCluster(ctx, clusterID)
if err != nil {
if strings.Contains(err.Error(), "NOT_FOUND") {
return &models.DescribeClusterResponse{}, "Deleted", nil
@@ -214,3 +337,43 @@ func (r *dwClusterResource) stateRefresh(ctx context.Context, clusterID *string,
return cluster, cluster.Cluster.Status, nil
}
}
+
+func (r *dwClusterResource) databaseCatalogStateRefresh(ctx context.Context, clusterID *string, callFailedCount *int, callFailureThreshold int) func() (any, string, error) {
+ return func() (any, string, error) {
+ tflog.Debug(ctx, "About to get DBCs")
+ response, err := r.listDatabaseCatalogs(ctx, clusterID)
+ if err != nil {
+ tflog.Error(ctx,
+ fmt.Sprintf("could not list database catalogs, unexpected error: %s", err.Error()),
+ )
+ return nil, "", err
+ }
+ resp := response.GetPayload()
+ if len(resp.Dbcs) == 0 {
+ *callFailedCount++
+ if *callFailedCount <= callFailureThreshold {
+ tflog.Warn(ctx, fmt.Sprintf("could not find Data Warehouse database catalog "+
+ "due to [%s] but threshold limit is not reached yet (%d out of %d).", err.Error(), callFailedCount, callFailureThreshold))
+ return nil, "", nil
+ }
+ tflog.Error(ctx, fmt.Sprintf("error describing Data Warehouse database catalog due to [%s] "+
+ "failure threshold limit exceeded.", err.Error()))
+ return nil, "", err
+ }
+ if len(resp.Dbcs) > 1 {
+ err = fmt.Errorf("found more than one Data Warehouse database catalog for cluster %s", *clusterID)
+ tflog.Error(ctx, fmt.Sprintf("error describing Data Warehouse database catalog due to [%s] ", err.Error()))
+ return nil, "", err
+ }
+ *callFailedCount = 0
+
+ tflog.Debug(ctx, fmt.Sprintf("Found database catalog %s with status %s", resp.Dbcs[0].ID, resp.Dbcs[0].Status))
+ return resp.Dbcs[0], resp.Dbcs[0].Status, nil
+ }
+}
+
+func (r *dwClusterResource) listDatabaseCatalogs(ctx context.Context, clusterID *string) (*operations.ListDbcsOK, error) {
+ catalogParams := operations.NewListDbcsParamsWithContext(ctx).WithInput(&models.ListDbcsRequest{ClusterID: clusterID})
+ response, err := r.client.Dw.Operations.ListDbcs(catalogParams)
+ return response, err
+}
diff --git a/resources/dw/cluster/aws/resource_cluster_test.go b/resources/dw/cluster/aws/resource_cluster_test.go
index b342d731..4168c205 100644
--- a/resources/dw/cluster/aws/resource_cluster_test.go
+++ b/resources/dw/cluster/aws/resource_cluster_test.go
@@ -68,6 +68,10 @@ var testDwClusterSchema = schema.Schema{
Computed: true,
MarkdownDescription: "The status of the cluster.",
},
+ "version": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The version of the cluster.",
+ },
"node_role_cdw_managed_policy_arn": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The managed policy ARN to be attached to the created node instance role.",
@@ -132,6 +136,7 @@ var testDwClusterSchema = schema.Schema{
},
"instance_settings": schema.SingleNestedAttribute{
Optional: true,
+ Computed: true,
Attributes: map[string]schema.Attribute{
"custom_ami_id": schema.StringAttribute{
Optional: true,
@@ -145,13 +150,30 @@ var testDwClusterSchema = schema.Schema{
},
"compute_instance_types": schema.ListAttribute{
Optional: true,
+ Computed: true,
ElementType: types.StringType,
MarkdownDescription: "The compute instance types that the environment is restricted to use. This affects the creation of virtual warehouses where this restriction will apply. Select an instance type that meets your computing, memory, networking, or storage needs. As of now, only a single instance type can be listed.",
},
- "additional_instance_types": schema.ListAttribute{
- Optional: true,
- ElementType: types.StringType,
- MarkdownDescription: "The additional instance types that the environment is allowed to use, listed in their priority order. They will be used instead of the primary compute instance type in case it is unavailable. You cannot include any instance type that was already indicated in computeInstanceTypes.",
+ },
+ },
+ "default_database_catalog": schema.SingleNestedAttribute{
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The ID of the database catalog.",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The name of the database catalog.",
+ },
+ "last_updated": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Timestamp of the last Terraform update of the order.",
+ },
+ "status": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The status of the database catalog.",
},
},
},
@@ -208,6 +230,7 @@ func createRawClusterResource() tftypes.Value {
"cluster_id": tftypes.String,
"last_updated": tftypes.String,
"status": tftypes.String,
+ "version": tftypes.String,
"node_role_cdw_managed_policy_arn": tftypes.String,
"database_backup_retention_days": tftypes.Number,
"custom_registry_options": tftypes.Object{
@@ -230,10 +253,17 @@ func createRawClusterResource() tftypes.Value {
},
"instance_settings": tftypes.Object{
AttributeTypes: map[string]tftypes.Type{
- "custom_ami_id": tftypes.String,
- "enable_spot_instances": tftypes.Bool,
- "compute_instance_types": tftypes.List{ElementType: tftypes.String},
- "additional_instance_types": tftypes.List{ElementType: tftypes.String},
+ "custom_ami_id": tftypes.String,
+ "enable_spot_instances": tftypes.Bool,
+ "compute_instance_types": tftypes.List{ElementType: tftypes.String},
+ },
+ },
+ "default_database_catalog": tftypes.Object{
+ AttributeTypes: map[string]tftypes.Type{
+ "id": tftypes.String,
+ "name": tftypes.String,
+ "last_updated": tftypes.String,
+ "status": tftypes.String,
},
},
"polling_options": tftypes.Object{
@@ -250,6 +280,7 @@ func createRawClusterResource() tftypes.Value {
"cluster_id": tftypes.NewValue(tftypes.String, "id"),
"last_updated": tftypes.NewValue(tftypes.String, ""),
"status": tftypes.NewValue(tftypes.String, "Accepted"),
+ "version": tftypes.NewValue(tftypes.String, "1.9.4-b10"),
"node_role_cdw_managed_policy_arn": tftypes.NewValue(tftypes.String, ""),
"database_backup_retention_days": tftypes.NewValue(tftypes.Number, 0),
"custom_registry_options": tftypes.NewValue(tftypes.Object{
@@ -306,15 +337,27 @@ func createRawClusterResource() tftypes.Value {
"instance_settings": tftypes.NewValue(
tftypes.Object{
AttributeTypes: map[string]tftypes.Type{
- "custom_ami_id": tftypes.String,
- "enable_spot_instances": tftypes.Bool,
- "compute_instance_types": tftypes.List{ElementType: tftypes.String},
- "additional_instance_types": tftypes.List{ElementType: tftypes.String},
+ "custom_ami_id": tftypes.String,
+ "enable_spot_instances": tftypes.Bool,
+ "compute_instance_types": tftypes.List{ElementType: tftypes.String},
+ }}, map[string]tftypes.Value{
+ "custom_ami_id": tftypes.NewValue(tftypes.String, ""),
+ "enable_spot_instances": tftypes.NewValue(tftypes.Bool, false),
+ "compute_instance_types": tftypes.NewValue(tftypes.List{ElementType: tftypes.String}, []tftypes.Value{}),
+ },
+ ),
+ "default_database_catalog": tftypes.NewValue(
+ tftypes.Object{
+ AttributeTypes: map[string]tftypes.Type{
+ "id": tftypes.String,
+ "name": tftypes.String,
+ "last_updated": tftypes.String,
+ "status": tftypes.String,
}}, map[string]tftypes.Value{
- "custom_ami_id": tftypes.NewValue(tftypes.String, ""),
- "enable_spot_instances": tftypes.NewValue(tftypes.Bool, false),
- "compute_instance_types": tftypes.NewValue(tftypes.List{ElementType: tftypes.String}, []tftypes.Value{}),
- "additional_instance_types": tftypes.NewValue(tftypes.List{ElementType: tftypes.String}, []tftypes.Value{}),
+ "id": tftypes.NewValue(tftypes.String, ""),
+ "name": tftypes.NewValue(tftypes.String, ""),
+ "last_updated": tftypes.NewValue(tftypes.String, ""),
+ "status": tftypes.NewValue(tftypes.String, "Starting"),
},
),
"polling_options": tftypes.NewValue(
@@ -380,11 +423,23 @@ func (suite *DwClusterTestSuite) TestDwAwsClusterCreate_Success() {
EnvironmentCrn: "crn",
ID: "cluster-id",
Name: "test-name",
+ Status: "Running",
+ }}}
+
+ expectedDbcResponse := &operations.ListDbcsOK{
+ Payload: &models.ListDbcsResponse{
+ Dbcs: []*models.DbcSummary{
+ {
+ ID: "dbc-id",
+ Name: "dbc-name",
+ Status: "Running",
+ },
}}}
client := new(mocks.MockDwClientService)
client.On("CreateAwsCluster", mock.Anything).Return(suite.expectedCreateResponse, nil)
client.On("DescribeCluster", mock.Anything).Return(expectedDescribeResponse, nil)
+ client.On("ListDbcs", mock.Anything).Return(expectedDbcResponse, nil)
dwApi := NewDwApi(client)
req := resource.CreateRequest{
@@ -473,6 +528,7 @@ func (suite *DwClusterTestSuite) TestDwAwsClusterDeletion_Success() {
ctx := context.TODO()
client := new(mocks.MockDwClientService)
client.On("DeleteCluster", mock.Anything).Return(&operations.DeleteClusterOK{}, nil)
+ client.On("DescribeCluster", mock.Anything).Return(&operations.DescribeClusterOK{}, fmt.Errorf("NOT_FOUND"))
dwApi := NewDwApi(client)
req := resource.DeleteRequest{
@@ -530,7 +586,7 @@ func (suite *DwClusterTestSuite) TestStateRefresh_Success() {
callFailureThreshold := 3
// Function under test
- refresh := dwApi.stateRefresh(ctx, &clusterID, &callFailedCount, callFailureThreshold)
+ refresh := dwApi.clusterStateRefresh(ctx, &clusterID, &callFailedCount, callFailureThreshold)
_, status, err := refresh()
suite.NoError(err)
suite.Equal("Running", status)
@@ -548,7 +604,7 @@ func (suite *DwClusterTestSuite) TestStateRefresh_FailureThresholdReached() {
callFailureThreshold := 3
// Function under test
- refresh := dwApi.stateRefresh(ctx, &clusterID, &callFailedCount, callFailureThreshold)
+ refresh := dwApi.clusterStateRefresh(ctx, &clusterID, &callFailedCount, callFailureThreshold)
var err error
for i := 0; i <= callFailureThreshold; i++ {
_, _, err = refresh()
diff --git a/resources/dw/cluster/aws/schema_cluster.go b/resources/dw/cluster/aws/schema_cluster.go
index 7eb1feba..9ae63301 100644
--- a/resources/dw/cluster/aws/schema_cluster.go
+++ b/resources/dw/cluster/aws/schema_cluster.go
@@ -52,6 +52,10 @@ var dwClusterSchema = schema.Schema{
Computed: true,
MarkdownDescription: "The status of the cluster.",
},
+ "version": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The version of the cluster.",
+ },
"node_role_cdw_managed_policy_arn": schema.StringAttribute{
Optional: true,
MarkdownDescription: "The managed policy ARN to be attached to the created node instance role.",
@@ -83,8 +87,13 @@ var dwClusterSchema = schema.Schema{
},
"instance_settings": schema.SingleNestedAttribute{
Optional: true,
+ Computed: true,
Attributes: instanceSettings,
},
+ "default_database_catalog": schema.SingleNestedAttribute{
+ Computed: true,
+ Attributes: defaultDatabaseCatalogProperties,
+ },
"polling_options": schema.SingleNestedAttribute{
MarkdownDescription: "Polling related configuration options that could specify various values that will be used during CDP resource creation.",
Optional: true,
@@ -163,12 +172,27 @@ var instanceSettings = map[string]schema.Attribute{
},
"compute_instance_types": schema.ListAttribute{
Optional: true,
+ Computed: true,
ElementType: types.StringType,
MarkdownDescription: "The compute instance types that the environment is restricted to use. This affects the creation of virtual warehouses where this restriction will apply. Select an instance type that meets your computing, memory, networking, or storage needs. As of now, only a single instance type can be listed.",
},
- "additional_instance_types": schema.ListAttribute{
- Optional: true,
- ElementType: types.StringType,
- MarkdownDescription: "The additional instance types that the environment is allowed to use, listed in their priority order. They will be used instead of the primary compute instance type in case it is unavailable. You cannot include any instance type that was already indicated in computeInstanceTypes.",
+}
+
+var defaultDatabaseCatalogProperties = map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The ID of the database catalog.",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The name of the database catalog.",
+ },
+ "last_updated": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Timestamp of the last Terraform update of the order.",
+ },
+ "status": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The status of the database catalog.",
},
}
diff --git a/resources/dw/databasecatalog/resource_catalog_test.go b/resources/dw/databasecatalog/resource_catalog_test.go
index 39b311b2..e7880537 100644
--- a/resources/dw/databasecatalog/resource_catalog_test.go
+++ b/resources/dw/databasecatalog/resource_catalog_test.go
@@ -37,6 +37,8 @@ import (
var testDatabaseCatalogSchema = schema.Schema{
MarkdownDescription: "Creates an AWS Data Warehouse database catalog.",
+ DeprecationMessage: "This resource is deprecated and will be removed in the next major release. The cluster resource " +
+ "default_database_catalog attribute will be used instead. All properties are computed, no need to specify them manually.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Computed: true,
diff --git a/resources/dw/databasecatalog/schema_catalog.go b/resources/dw/databasecatalog/schema_catalog.go
index dbe5a235..b1335290 100644
--- a/resources/dw/databasecatalog/schema_catalog.go
+++ b/resources/dw/databasecatalog/schema_catalog.go
@@ -21,6 +21,8 @@ import (
var dwDefaultDatabaseCatalogSchema = schema.Schema{
MarkdownDescription: "Creates an AWS Data Warehouse database catalog.",
+ DeprecationMessage: "This resource is deprecated and will be removed in the next major release. The cluster resource " +
+ "default_database_catalog attribute will be used instead. All properties are computed, no need to specify them manually.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Computed: true,
diff --git a/resources/dw/resource_dw_acc_test.go b/resources/dw/resource_dw_acc_test.go
index 5bcd42d1..97fcc0be 100644
--- a/resources/dw/resource_dw_acc_test.go
+++ b/resources/dw/resource_dw_acc_test.go
@@ -175,7 +175,6 @@ func TestAccDwCluster_Basic(t *testing.T) {
testAccAwsEnvironmentConfig(&envParams),
testAccAwsDataLakeConfig(&dlParams),
testAccAwsClusterBasicConfig(&envParams),
- testAccDwCatalog(),
testAccHiveVirtualWarehouse(cdpacctest.RandomShortWithPrefix("tf-hive")),
testAccImpalaVirtualWarehouse(cdpacctest.RandomShortWithPrefix("tf-impala")),
testAccDataVisualization(cdpacctest.RandomShortWithPrefix("tf-dataviz"))),
@@ -271,8 +270,8 @@ func testAccAwsDataLakeConfig(params *awsDataLakeTestParameters) string {
depends_on = [ cdp_environments_id_broker_mappings.test_idbm_dw_aws ]
}
`,
- params.DataAccessRole, params.RangerAuditRole, params.Name, params.AssumerRole, params.StorageLocationBase,
- params.Runtime)
+ params.DataAccessRole, params.RangerAuditRole, params.Name,
+ params.AssumerRole, params.StorageLocationBase, params.Runtime)
}
func testAccAwsClusterBasicConfig(params *awsEnvironmentTestParameters) string {
@@ -291,19 +290,11 @@ func testAccAwsClusterBasicConfig(params *awsEnvironmentTestParameters) string {
`, params.SubnetIds)
}
-func testAccDwCatalog() string {
- return `
- resource "cdp_dw_database_catalog" "test_catalog" {
- cluster_id = cdp_dw_aws_cluster.test_data_warehouse_aws.cluster_id
- }
- `
-}
-
func testAccHiveVirtualWarehouse(name string) string {
return fmt.Sprintf(`
resource "cdp_dw_vw_hive" "test_hive" {
cluster_id = cdp_dw_aws_cluster.test_data_warehouse_aws.cluster_id
- database_catalog_id = cdp_dw_database_catalog.test_catalog.id
+ database_catalog_id = cdp_dw_aws_cluster.test_data_warehouse_aws.default_database_catalog.id
name = %[1]q
group_size = 2
platform_jwt_auth = true
@@ -318,9 +309,9 @@ func testAccHiveVirtualWarehouse(name string) string {
aws_options = {
availability_zone = "us-west-2a"
ebs_llap_spill_gb = 300
- tags = {
- owner = "cdw-terraform@cloudera.com"
- }
+ tags = {
+ "made-with": "CDP Terraform Provider"
+ }
}
}
`, name)
@@ -354,7 +345,7 @@ func testAccImpalaVirtualWarehouse(name string) string {
return fmt.Sprintf(`
resource "cdp_dw_vw_impala" "test_impala" {
cluster_id = cdp_dw_aws_cluster.test_data_warehouse_aws.cluster_id
- database_catalog_id = cdp_dw_database_catalog.test_catalog.id
+ database_catalog_id = cdp_dw_aws_cluster.test_data_warehouse_aws.default_database_catalog.id
name = %[1]q
}
`, name)