Skip to content

Commit

Permalink
Add cluster resource v2 for PoC
Browse files Browse the repository at this point in the history
  • Loading branch information
bobbyiliev committed Mar 19, 2024
1 parent f40af7d commit 28e7225
Show file tree
Hide file tree
Showing 6 changed files with 431 additions and 26 deletions.
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ require (
github.com/hashicorp/terraform-exec v0.20.0 // indirect
github.com/hashicorp/terraform-json v0.21.0 // indirect
github.com/hashicorp/terraform-plugin-framework v1.6.1 // indirect
github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 // indirect
github.com/hashicorp/terraform-plugin-go v0.22.0 // indirect
github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
github.com/hashicorp/terraform-plugin-mux v0.15.0 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ github.com/hashicorp/terraform-plugin-docs v0.18.0 h1:2bINhzXc+yDeAcafurshCrIjtd
github.com/hashicorp/terraform-plugin-docs v0.18.0/go.mod h1:iIUfaJpdUmpi+rI42Kgq+63jAjI8aZVTyxp3Bvk9Hg8=
github.com/hashicorp/terraform-plugin-framework v1.6.1 h1:hw2XrmUu8d8jVL52ekxim2IqDc+2Kpekn21xZANARLU=
github.com/hashicorp/terraform-plugin-framework v1.6.1/go.mod h1:aJI+n/hBPhz1J+77GdgNfk5svW12y7fmtxe/5L5IuwI=
github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc=
github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg=
github.com/hashicorp/terraform-plugin-go v0.22.0 h1:1OS1Jk5mO0f5hrziWJGXXIxBrMe2j/B8E+DVGw43Xmc=
github.com/hashicorp/terraform-plugin-go v0.22.0/go.mod h1:mPULV91VKss7sik6KFEcEu7HuTogMLLO/EvWCuFkRVE=
github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=
Expand Down
17 changes: 14 additions & 3 deletions pkg/provider/framework_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ type MaterializeProvider struct {
client *utils.ProviderMeta
}

type providerData struct {
type providerModelV0 struct {
Endpoint types.String `tfsdk:"endpoint"`
CloudEndpoint types.String `tfsdk:"cloud_endpoint"`
BaseEndpoint types.String `tfsdk:"base_endpoint"`
Expand Down Expand Up @@ -95,15 +95,17 @@ func (p *MaterializeProvider) DataSources(ctx context.Context) []func() datasour

// Configure implements the logic from your providerConfigure function adapted for the Plugin Framework
func (p *MaterializeProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) {
var config providerData
var config providerModelV0

diags := req.Config.Get(ctx, &config)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}

// Extracting values from providerData or falling back to environment variables
log.Printf("[DEBUG] Provider configuration: %+v\n", config)

// Extracting values from providerModelV0 or falling back to environment variables
password := config.Password.ValueString()
if password == "" {
password = os.Getenv("MZ_PASSWORD")
Expand Down Expand Up @@ -227,6 +229,10 @@ func (p *MaterializeProvider) Configure(ctx context.Context, req provider.Config

log.Printf("[DEBUG] Initialized DB clients for regions: %v\n", dbClients)

if resp.Diagnostics.HasError() {
return
}

// Store the configured values in the provider instance for later use
p.client = &utils.ProviderMeta{
DB: dbClients,
Expand All @@ -235,4 +241,9 @@ func (p *MaterializeProvider) Configure(ctx context.Context, req provider.Config
DefaultRegion: clients.Region(defaultRegion),
RegionsEnabled: regionsEnabled,
}
providerData := &utils.ProviderData{
Client: p.client,
}
resp.DataSourceData = providerData
resp.ResourceData = providerData
}
200 changes: 177 additions & 23 deletions pkg/resources/resource_cluster_new.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,63 +2,217 @@ package resources

import (
"context"
"fmt"
"log"
"strings"

"github.com/MaterializeInc/terraform-provider-materialize/pkg/materialize"
"github.com/MaterializeInc/terraform-provider-materialize/pkg/utils"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/types"
)

// Define the resource schema and methods.
type ClusterResource struct{}
type clusterResource struct {
client *utils.ProviderData
}

func NewClusterResource() resource.Resource {
return &ClusterResource{}
return &clusterResource{}
}

func (r *ClusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
func (r *clusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = "materialize_cluster_2"
// resp.TypeName = req.ProviderTypeName + "_cluster_2"
}

func (r *ClusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: map[string]schema.Attribute{
"name": schema.StringAttribute{
Description: "The name of the cluster.",
Required: true,
// Validator: validateClusterName,
type ClusterStateModelV0 struct {
ID types.String `tfsdk:"id"`
Name types.String `tfsdk:"name"`
Size types.String `tfsdk:"size"`
ReplicationFactor types.Int64 `tfsdk:"replication_factor"`
Disk types.Bool `tfsdk:"disk"`
AvailabilityZones types.List `tfsdk:"availability_zones"`
IntrospectionInterval types.String `tfsdk:"introspection_interval"`
IntrospectionDebugging types.Bool `tfsdk:"introspection_debugging"`
IdleArrangementMergeEffort types.Int64 `tfsdk:"idle_arrangement_merge_effort"`
OwnershipRole types.String `tfsdk:"ownership_role"`
Comment types.String `tfsdk:"comment"`
Region types.String `tfsdk:"region"`
}

func ClusterSchema() map[string]schema.Attribute {
return map[string]schema.Attribute{
"id": schema.StringAttribute{
Computed: true,
MarkdownDescription: "The Cluster ID",
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
// "name": ObjectNameSchema("cluster", true, true),
},
"name": NewObjectNameSchema("cluster", true, true),
"comment": NewCommentSchema(false),
"ownership_role": NewOwnershipRoleSchema(),
"size": NewSizeSchema("managed cluster", false, false, []string{"replication_factor", "availability_zones"}),
"replication_factor": NewReplicationFactorSchema(),
"disk": NewDiskSchema(false),
"availability_zones": NewAvailabilityZonesSchema(),
"introspection_interval": NewIntrospectionIntervalSchema(false, []string{"size"}),
"introspection_debugging": NewIntrospectionDebuggingSchema(false, []string{"size"}),
"idle_arrangement_merge_effort": NewIdleArrangementMergeEffortSchema(false, []string{"size"}),
"region": NewRegionSchema(),
}
}

// Implement other necessary methods (Create, Read, Update, Delete)...
// Implement Create method to store the cluster name in the state.
func (r *ClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var state struct {
Name types.String `tfsdk:"name"`
func (r *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: ClusterSchema(),
}
}

func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Prevent panic if the provider has not been configured.
if req.ProviderData == nil {
return
}

// client, ok := req.ProviderData.(*provider.ProviderData)
client, ok := req.ProviderData.(*utils.ProviderData)

// Verbously log the reg.ProviderData
log.Printf("[DEBUG] ProviderData contents: %+v\n", fmt.Sprintf("%+v", req.ProviderData))

if !ok {
resp.Diagnostics.AddError(
"Unexpected Resource Configure Type",
fmt.Sprintf("Expected *utils.ProviderMeta, got: %T. Please report this issue to the provider developers.", req.ProviderData),
)

return
}

r.client = client
}

// Implement Create method to store the cluster name in the state.
func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
// Initialize and retrieve values from the request's plan.
var state ClusterStateModelV0
diags := req.Plan.Get(ctx, &state)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}

// In a real-world scenario, you'd call an API to create the cluster and get its details.
// Here, we'll just echo back the name for demonstration purposes.
metaDb, region, err := utils.NewGetDBClientFromMeta(r.client, state.Region.ValueString())
if err != nil {
resp.Diagnostics.AddError("Failed to get DB client", err.Error())
return
}

o := materialize.MaterializeObject{ObjectType: "CLUSTER", Name: state.Name.ValueString()}
b := materialize.NewClusterBuilder(metaDb, o)

// Managed cluster options.
if !state.Size.IsNull() {
size := state.Size.ValueString()

b.Size(size)

if !state.ReplicationFactor.IsNull() {
r := int(state.ReplicationFactor.ValueInt64())
b.ReplicationFactor(&r)
}

if strings.HasSuffix(size, "cc") || strings.HasSuffix(size, "C") {
// DISK option not supported for cluster sizes ending in cc or C.
log.Printf("[WARN] disk option not supported for cluster size %s, disk is always enabled", size)
b.Disk(true)
} else if !state.Disk.IsNull() {
b.Disk(state.Disk.ValueBool())
}

if !state.AvailabilityZones.IsNull() && len(state.AvailabilityZones.Elements()) > 0 {
f := make([]string, len(state.AvailabilityZones.Elements()))
for i, elem := range state.AvailabilityZones.Elements() {
f[i] = elem.(types.String).ValueString()
}
b.AvailabilityZones(f)
}

if !state.IntrospectionInterval.IsNull() {
b.IntrospectionInterval(state.IntrospectionInterval.ValueString())
}

if !state.IntrospectionDebugging.IsNull() && state.IntrospectionDebugging.ValueBool() {
b.IntrospectionDebugging()
}

if !state.IdleArrangementMergeEffort.IsNull() {
b.IdleArrangementMergeEffort(int(state.IdleArrangementMergeEffort.ValueInt64()))
}
}

// Create the resource.
if err := b.Create(); err != nil {
resp.Diagnostics.AddError("Failed to create the cluster", err.Error())
return
}

// Ownership.
// TODO: Fix failing error
// if !state.OwnershipRole.IsNull() {
// ownership := materialize.NewOwnershipBuilder(metaDb, o)

// Store the cluster name in the state.
resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
// if err := ownership.Alter(state.OwnershipRole.ValueString()); err != nil {
// log.Printf("[DEBUG] resource failed ownership, dropping object: %s", o.Name)
// b.Drop()
// resp.Diagnostics.AddError("Failed to set ownership", err.Error())
// return
// }
// }

// Object comment.
if !state.Comment.IsNull() {
comment := materialize.NewCommentBuilder(metaDb, o)

if err := comment.Object(state.Comment.ValueString()); err != nil {
log.Printf("[DEBUG] resource failed comment, dropping object: %s", o.Name)
b.Drop()
resp.Diagnostics.AddError("Failed to add comment", err.Error())
return
}
}

// Set ID.
i, err := materialize.ClusterId(metaDb, o)
if err != nil {
resp.Diagnostics.AddError("Failed to set resource ID", err.Error())
return
}

// After all operations are successful and you have the cluster ID:
clusterID := utils.TransformIdWithRegion(string(region), i)

// Update the ID in the state and set the entire state in the response
state.ID = types.StringValue(clusterID)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}

func (r *ClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
// Implementation for Read operation
}

func (r *ClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
// Implementation for Update operation
}

func (r *ClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
// Implementation for Delete operation
}
Loading

0 comments on commit 28e7225

Please sign in to comment.