Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add IPPool and LoadBalancer resources #102

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Dockerfile.dapper
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ RUN wget --quiet https://github.com/docker/buildx/releases/download/v0.13.1/buil
mv buildx-v0.13.1.linux-${ARCH} /usr/local/bin/buildx && \
mv terraform /usr/local/bin/terraform

ENV DAPPER_RUN_ARGS="--network host"
ENV DAPPER_ENV REPO TAG DRONE_TAG
ENV DAPPER_SOURCE /go/src/github.com/harvester/terraform-provider-harvester
ENV DAPPER_OUTPUT ./bin ./dist
Expand Down
9 changes: 8 additions & 1 deletion docs/resources/virtualmachine.md
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ resource "harvester_virtualmachine" "opensuse154" {
- **secure_boot** (Boolean) EFI must be enabled to use this feature
- **ssh_keys** (List of String)
- **start** (Boolean, Deprecated)
- **tags** (Map of String)
- **tags** (Map of String) (otherwise known as labels, see [below](#tags))
- **tpm** (Block List, Max: 1) (see [below for nested schema](#nestedblock--tpm))

### Read-Only
Expand Down Expand Up @@ -299,6 +299,13 @@ Optional:

- **name** (String) just add this field for doc generation

<a id="tags"></a>
### Tags / Labels

Optional, map of strings. The keys of the tags will appear as labels
`tags.harvesterhci.io/${KEY}`. The values of the map will be the
corresponding values of the labels.

## Import

Import is supported using the following syntax:
Expand Down
10 changes: 10 additions & 0 deletions examples/data-sources/harvester_ippool/data-source.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
data "harvester_ippool" "service_ips" {
name = "service_ips"

range {
start = "192.168.0.1"
end = "192.168.0.254"
subnet = "192.168.0.1/24"
gateway = "192.168.0.1"
}
}
27 changes: 27 additions & 0 deletions examples/resources/harvester_ippool/resources.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
resource "harvester_ippool" "service_ips" {
name = "service-ips"

range {
start = "10.11.0.1"
end = "10.11.0.254"
subnet = "10.11.0.1/24"
gateway = "10.11.0.1"
}

range {
start = "10.12.0.1"
end = "10.12.0.254"
subnet = "10.12.0.1/24"
gateway = "10.12.0.1"
}

selector {
priority = 100
network = "vm-network"
scope {
project = "services"
namespace = "prod-default"
guest_cluster = "prod-services"
}
}
}
47 changes: 47 additions & 0 deletions examples/resources/harvester_loadbalancer/resources.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
resource "harvester_loadbalancer" "service_loadbalancer" {
name = "service-loadbalancer"

# This ensures correct ordering for the creation of the resources.
# The loadbalancer resource will be rejected by the admission webhook, if not
# at least one virtual machine with labels matching the backend_selector(s)
# already exists. This dependency ordering can be used to create that virtual
# machine with the same Terraform file.
depends_on = [
harvester_virtualmachine.name
]

listener {
port = 443
protocol = "tcp"
backend_port = 8080
}

listener {
port = 80
protocol = "tcp"
backend_port = 8080
}

ipam = "ippool"
ippool = "service-ips"

workload_type = "vm"

backend_selector {
key = "app"
values = [ "test" ]
}

backend_selector {
key = "component"
values = [ "frontend", "ui" ]
}

healthcheck {
port = 443
success_threshold = 1
failure_threshold = 3
period_seconds = 10
timeout_seconds = 5
}
}
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ replace (
)

require (
github.com/google/uuid v1.6.0
github.com/harvester/harvester v1.3.2
github.com/harvester/harvester-load-balancer v0.3.0
github.com/harvester/harvester-network-controller v0.5.4
github.com/hashicorp/terraform-plugin-docs v0.4.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.6.1
Expand Down Expand Up @@ -101,7 +103,6 @@ require (
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -2083,6 +2083,8 @@ github.com/harvester/go-common v0.0.0-20240627083535-c1208a490f89 h1:wqQaZqxxOQr
github.com/harvester/go-common v0.0.0-20240627083535-c1208a490f89/go.mod h1:70bhzIm0iXpR1J8hr5jg2jzAvNkM4f2ZsU1R4Sbx9X4=
github.com/harvester/harvester v1.3.2 h1:kKC8tL+wygB6U8s5q3YUtkJyr3uJmbR8spyw9OEC34A=
github.com/harvester/harvester v1.3.2/go.mod h1:dCT/UePTJTW3QFzwyRfkhGGyr5ts0ZvkX/MsOlP2GdA=
github.com/harvester/harvester-load-balancer v0.3.0 h1:R4ymvCTFraic+zIMNYFPtIf8KAR3S9dnHdPRgHn85gk=
github.com/harvester/harvester-load-balancer v0.3.0/go.mod h1:bbsdXuGkVeoMuzb2yLrrc8RXmm4hpIe9zdoreziUUaA=
github.com/harvester/harvester-network-controller v0.5.4 h1:EmVOo1M4n+TXFKoh4lwdx+pzaXZg5lE4XYUACUD1kMU=
github.com/harvester/harvester-network-controller v0.5.4/go.mod h1:IeAcGckQpEEXq6CpXgm9sUrOJmqEkdeU2+DFoxbL084=
github.com/harvester/node-manager v0.1.5-0.20230614075852-de2da3ef3aca h1:ySmceYltYR2wmeAvifoXtHQOK/N6v7z7Rkaq/6IPJ5A=
Expand Down
34 changes: 34 additions & 0 deletions internal/provider/ippool/datasource_ippool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package ippool

import (
"context"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/harvester/terraform-provider-harvester/pkg/client"
"github.com/harvester/terraform-provider-harvester/pkg/constants"
)

func DataSourceIPPool() *schema.Resource {
return &schema.Resource{
ReadContext: dataSourceIPPoolRead,
Schema: DataSourceSchema(),
}
}

func dataSourceIPPoolRead(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*client.Client)
name := data.Get(constants.FieldCommonName).(string)

ippool, err := c.HarvesterLoadbalancerClient.
LoadbalancerV1beta1().
IPPools().
Get(ctx, name, metav1.GetOptions{})
if err != nil {
return diag.FromErr(err)
}
return diag.FromErr(resourceIPPoolImport(data, ippool))
}
131 changes: 131 additions & 0 deletions internal/provider/ippool/resource_ippool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package ippool

import (
"context"
"time"

loadbalancerv1 "github.com/harvester/harvester-load-balancer/pkg/apis/loadbalancer.harvesterhci.io/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"

"github.com/harvester/terraform-provider-harvester/internal/util"
"github.com/harvester/terraform-provider-harvester/pkg/client"
"github.com/harvester/terraform-provider-harvester/pkg/constants"
"github.com/harvester/terraform-provider-harvester/pkg/helper"
"github.com/harvester/terraform-provider-harvester/pkg/importer"
)

func ResourceIPPool() *schema.Resource {
return &schema.Resource{
CreateContext: resourceIPPoolCreate,
ReadContext: resourceIPPoolRead,
UpdateContext: resourceIPPoolUpdate,
DeleteContext: resourceIPPoolDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Schema: Schema(),
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(2 * time.Minute),
Read: schema.DefaultTimeout(2 * time.Minute),
Update: schema.DefaultTimeout(2 * time.Minute),
Delete: schema.DefaultTimeout(2 * time.Minute),
Default: schema.DefaultTimeout(2 * time.Minute),
},
}
}

func resourceIPPoolCreate(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*client.Client)
name := data.Get(constants.FieldCommonName).(string)
toCreate, err := util.ResourceConstruct(data, Creator(name))
if err != nil {
return diag.FromErr(err)
}
ippool, err := c.HarvesterLoadbalancerClient.
LoadbalancerV1beta1().
IPPools().
Create(ctx, toCreate.(*loadbalancerv1.IPPool), metav1.CreateOptions{})
if err != nil {
return diag.FromErr(err)
}
data.SetId(helper.BuildID("", name))
return diag.FromErr(resourceIPPoolImport(data, ippool))
}

func resourceIPPoolRead(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*client.Client)
_, name, err := helper.IDParts(data.Id())
if err != nil {
return diag.FromErr(err)
}

ippool, err := c.HarvesterLoadbalancerClient.
LoadbalancerV1beta1().
IPPools().
Get(ctx, name, metav1.GetOptions{})
if err != nil {
return diag.FromErr(err)
}

return diag.FromErr(resourceIPPoolImport(data, ippool))
}

func resourceIPPoolUpdate(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*client.Client)
_, name, err := helper.IDParts(data.Id())
if err != nil {
return diag.FromErr(err)
}

obj, err := c.HarvesterLoadbalancerClient.
LoadbalancerV1beta1().
IPPools().
Get(ctx, name, metav1.GetOptions{})
if err != nil {
return diag.FromErr(err)
}

toUpdate, err := util.ResourceConstruct(data, Updater(obj))
if err != nil {
return diag.FromErr(err)
}

ippool, err := c.HarvesterLoadbalancerClient.
LoadbalancerV1beta1().
IPPools().
Update(ctx, toUpdate.(*loadbalancerv1.IPPool), metav1.UpdateOptions{})
if err != nil {
return diag.FromErr(err)
}
return diag.FromErr(resourceIPPoolImport(data, ippool))
}

func resourceIPPoolDelete(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*client.Client)
_, name, err := helper.IDParts(data.Id())
if err != nil {
return diag.FromErr(err)
}

err = c.HarvesterLoadbalancerClient.
LoadbalancerV1beta1().
IPPools().
Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return diag.FromErr(err)
}

return diag.FromErr(nil)
}

func resourceIPPoolImport(data *schema.ResourceData, obj *loadbalancerv1.IPPool) error {
stateGetter, err := importer.ResourceIPPoolStateGetter(obj)
if err != nil {
return err
}
return util.ResourceStatesSet(data, stateGetter)
}
Loading
Loading